diff --git a/.clang-format b/.clang-format index a42953e91256050949f994b43a376bd2ad40f3c7..0358511834d76ac653a7dec13c6277ab4f78baa7 100644 --- a/.clang-format +++ b/.clang-format @@ -11,6 +11,6 @@ AllowShortCaseLabelsOnASingleLine: false AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false IndentCaseLabels: true -Standard: Cpp03 +Standard: Cpp11 AccessModifierOffset: -4 BinPackParameters: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a52ff47414c8f18d4c8eb2ae136417d0cf01f82..d11abf3214bbd2ac1b87449b77106b2c6a4a3980 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,84 @@ Changelog ========= -v4.5.0 +v5.0.0 ------ -**NekMesh**: +**Library** +- Added in sum factorisation version for pyramid expansions and orthogonal + expansion in pyramids (!750) +- Significant overhaul of CMake infrastructure (!770, !804) +- Fix ThridpartyCCM options (!802) +- Fix Windows CRLF tokens in GEO reader and improve comment handling (!805) +- Use chrono in Timer (!807) +- Fix caching of FUNCTION tags that read from file and provide the same + functionality in FUNCTIONs defined for forcings (!759) +- Added native support for csv files in addititon to pts (!760) +**NekMesh**: +- Add feature to read basic 2D geo files as CAD (!731) - Add periodic boundary condition meshing in 2D (!733) - Adjust boundary layer thickness in corners in 2D (!739) +- Add non-O BL meshing in 2D (!757) +- Add ability to compile CCIO library but tar file is not yet openly + available whist we seek permission from Simens (!799) +- Fix issue with reading CCM files due to definition of default arrays + rather than a vector (!797) +- Fix inverted triangles and small memory issue in surface meshing (!798) + +**FieldConvert**: +- Add input module for Semtex field files (!777) **Documentation**: - Added the developer-guide repository as a submodule (!751) +v4.4.2 +------ +**NekMesh**: +- Fix uninitialised memory bug in Nek5000 input module (!801) + +**Library** +- Fix ability to set default implementation in Collections and added an option + to set eNoCollections in FieldConvert as default (!789) + +v4.4.1 +------ +**Library** +- Remove m_offset_elmt_id and GetOffsetElmtId which fixed problems in 2D when + quad elements are listed before tri elements (!758) +- Remove the duplicate output of errorutil (!756) +- Fix BLAS CMake dependencies (!763) +- Fix interpolation issue with Lagrange basis functions (!768) +- Fix issue with average fields not working with different polynomial order + fields (!776) +- Fix rounding of integer parameters (!774) +- Fix Hdf5 output in FilterFieldConvert (!781) +- Fixed extreme memory consumption of Interpolator when interpolating from pts + to fld or between different meshes (!783) +- Fix deadlock with HDF5 input (!786) +- Fix missing entriess in LibUtilities::kPointsTypeStr (!792) +- Fix compiler warnings with CommDataType (!793) + +**FieldConvert:** +- Fix issue with field ordering in the interppointdatatofld module (!754) +- Fix issue with FieldConvert when range flag used (!761) +- Fix issue when using output-points combined with noequispaced (!775) +- Fix equispacedoutput for 3DH1D with triangles (!787) + +**NekMesh**: +- Fix memory consumption issue with Gmsh output (!747, !762) +- Rework meshing control so that if possible viewable meshes will be dumped + when some part of the system fails (!756) +- Add manifold meshing option (!756) +- Fix issue with older rea input files (!765) +- Fix memory leak in variational optimiser, add small optimisations (!785) +- Check the dimensionality of the CAD system before running the 2D generator (!780) + +**IncNavierStokesSolver** +- Fix an initialisation issue when using an additional advective field (!779) + +**Packaging** +- Added missing package for FieldUtils library (!755) + v4.4.0 ------ **Library**: @@ -52,7 +120,7 @@ v4.4.0 - Fix bug in FieldUtils when using half mode expansions (!734) - Do not read the same fld/pts files again for every variable (!670) - Fix bug in CMake PETSc detection for Ubuntu 16.04/Debian 9 (!735) -- Added native support for csv files in addititon to pts (!760) +- Fix warnings with Intel compiler (!742) **ADRSolver:** - Add a projection equation system for C^0 projections (!675) @@ -106,6 +174,7 @@ v4.4.0 (!712) - 2D to 3D mesh extrusion module (!715) - Add new two-dimensional mesher from NACA code or step file (!720) +- Add basic gmsh cad (.geo) reader to the meshing system (!731) - Fix inverted boundary layer in 2D (!736) - More sensible element sizing with boundary layers in 2D (!736) - Change variable names in mcf file to make more sense (!736) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6a592e6197dfcae399997e0679dd824f15cfc9d4..9067a76c88e942f3a2edf0b125d23797d975e627 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,14 +5,34 @@ RelWithDebInfo MinSizeRel.") PROJECT(Nektar++ C CXX) +# Nektar++ requires C++11. Try to infer this for older CMake versions (less than +# 3.1.0) +IF ("${CMAKE_VERSION}" VERSION_LESS "3.1") + IF (NOT MSVC) + INCLUDE(CheckCXXCompilerFlag) + CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) + + IF (COMPILER_SUPPORTS_CXX11) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + ELSE() + MESSAGE(FATAL_ERROR "Nektar++ requires a compiler with C++11 support.") + ENDIF() + ELSEIF(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 18.0) + MESSAGE(FATAL_ERROR "Nektar++ requires full C++11 support: please upgrade to Visual Studio 2013 or later") + ENDIF() +ELSE() + SET(CMAKE_CXX_STANDARD 11) + SET(CMAKE_CXX_STANDARD_REQUIRED ON) + SET(CMAKE_CXX_EXTENSIONS OFF) +ENDIF() INCLUDE(CheckLanguage) CHECK_LANGUAGE(Fortran) IF(CMAKE_Fortran_COMPILER) - ENABLE_LANGUAGE(Fortran) + ENABLE_LANGUAGE(Fortran) ELSE() - MESSAGE(STATUS "No Fortran support") + MESSAGE(STATUS "No Fortran support") ENDIF() # Helps organize projects in IDEs. @@ -29,6 +49,7 @@ SET(NEKTAR_VERSION ${NEKTAR_VERSION_MAJOR}.${NEKTAR_VERSION_MINOR}.${NEKTAR_VERS # Add support for CMAKE_DEPENDENT_OPTION INCLUDE(CMakeDependentOption) +INCLUDE(CMakeParseArguments) # Enable CTest. ENABLE_TESTING() @@ -161,28 +182,24 @@ OPTION(NEKTAR_USE_EXPRESSION_TEMPLATES MARK_AS_ADVANCED(NEKTAR_USE_EXPRESSION_TEMPLATES) # BLAS Support -OPTION(NEKTAR_USE_BLAS_LAPACK "Use Blas and lapack routines." ON) +CMAKE_DEPENDENT_OPTION(NEKTAR_USE_SYSTEM_BLAS_LAPACK + "Use the system provided blas and lapack libraries" ON + "UNIX; NOT APPLE; NOT NEKTAR_USE_OPENBLAS; NOT NEKTAR_USE_MKL; NOT NEKTAR_USE_ACML; NOT NEKTAR_USE_ACCELERATE_FRAMEWORK" OFF) CMAKE_DEPENDENT_OPTION(NEKTAR_USE_OPENBLAS "Use OpenBLAS library as a substitute to native BLAS." OFF - "NEKTAR_USE_BLAS_LAPACK" OFF) -CMAKE_DEPENDENT_OPTION(NEKTAR_USE_SMV - "Use LibSMV library for faster small matrix-vector multiplies." OFF - "NEKTAR_USE_BLAS_LAPACK" OFF) + "NOT NEKTAR_USE_SYSTEM_BLAS_LAPACK" OFF) CMAKE_DEPENDENT_OPTION(NEKTAR_USE_ACML "Use the AMD Core Math Library (ACML) for BLAS and Lapack support." OFF - "NEKTAR_USE_BLAS_LAPACK" OFF) + "NOT NEKTAR_USE_SYSTEM_BLAS_LAPACK" OFF) CMAKE_DEPENDENT_OPTION(NEKTAR_USE_MKL "Use the Intel Math Kernel Library (MKL) for BLAS and Lapack support." OFF - "NEKTAR_USE_BLAS_LAPACK" OFF) -CMAKE_DEPENDENT_OPTION(NEKTAR_USE_SYSTEM_BLAS_LAPACK - "Use the system provided blas and lapack libraries" ON - "NEKTAR_USE_BLAS_LAPACK; UNIX; NOT APPLE; NOT NEKTAR_USE_OPENBLAS; NOT NEKTAR_USE_MKL; NOT NEKTAR_USE_ACML; NOT NEKTAR_USE_ACCELERATE_FRAMEWORK" OFF) + "NOT NEKTAR_USE_SYSTEM_BLAS_LAPACK" OFF) CMAKE_DEPENDENT_OPTION(NEKTAR_USE_ACCELERATE_FRAMEWORK "Use the Mac Accelerate Framework for BLAS and Lapack support." ON - "NEKTAR_USE_BLAS_LAPACK; APPLE" OFF) + "NOT NEKTAR_USE_SYSTEM_BLAS_LAPACK; APPLE" OFF) CMAKE_DEPENDENT_OPTION(NEKTAR_USE_WIN32_LAPACK "Use Win32 Lapack provided with the Third Party Distribution." - ON "NEKTAR_USE_BLAS_LAPACK; WIN32" OFF) + ON "NOT NEKTAR_USE_SYSTEM_BLAS_LAPACK; WIN32" OFF) # Memory pools OPTION(NEKTAR_USE_MEMORY_POOLS @@ -222,7 +239,6 @@ ENDIF () # Find ThirdParty libraries and headers. INCLUDE (ThirdPartyTinyxml) -INCLUDE (ThirdPartyLoki) INCLUDE (ThirdPartyMetis) INCLUDE (ThirdPartyHDF5) INCLUDE (ThirdPartyScotch) @@ -232,7 +248,6 @@ INCLUDE (ThirdPartyFFTW) INCLUDE (ThirdPartyArpack) INCLUDE (ThirdPartyMPI) INCLUDE (ThirdPartyVTK) -INCLUDE (ThirdPartySMV) INCLUDE (ThirdPartyOCE) INCLUDE (ThirdPartyTetGen) INCLUDE (ThirdPartyCCM) @@ -241,32 +256,18 @@ INCLUDE (Doxygen) IF( NEKTAR_USE_MKL ) INCLUDE (FindMKL) - SET(NEKTAR_USING_BLAS TRUE) - SET(NEKTAR_USING_LAPACK TRUE) - SET(NEKTAR_USING_MKL TRUE) ENDIF( NEKTAR_USE_MKL ) IF( NEKTAR_USE_OPENBLAS ) INCLUDE (FindOpenBlas) - SET(NEKTAR_USING_OPENBLAS TRUE) ENDIF( NEKTAR_USE_OPENBLAS ) -IF( NEKTAR_USE_SMV ) - INCLUDE (FindSMV) - SET(NEKTAR_USING_SMV TRUE) - ADD_DEFINITIONS(-DNEKTAR_USING_SMV) -ENDIF( NEKTAR_USE_SMV ) - IF( NEKTAR_USE_ACCELERATE_FRAMEWORK ) INCLUDE (FindAccelerateFramework) ENDIF( NEKTAR_USE_ACCELERATE_FRAMEWORK ) IF( NEKTAR_USE_ACML ) INCLUDE (FindACML) - SET(NEKTAR_USING_BLAS TRUE) - SET(NEKTAR_USING_LAPACK TRUE) - SET(NEKTAR_USING_ACML TRUE) - ADD_DEFINITIONS(-DUSE_ACML) ENDIF( NEKTAR_USE_ACML ) IF( NEKTAR_USE_WIN32_LAPACK ) @@ -275,24 +276,12 @@ ENDIF( NEKTAR_USE_WIN32_LAPACK ) IF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) INCLUDE(ThirdPartyBlasLapack) - SET(NEKTAR_USING_BLAS TRUE) - SET(NEKTAR_USING_LAPACK TRUE) ENDIF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) -IF( NEKTAR_USE_BLAS_LAPACK ) - SET(NEKTAR_USING_BLAS TRUE) - SET(NEKTAR_USING_LAPACK TRUE) - ADD_DEFINITIONS( -DNEKTAR_USING_LAPACK -DNEKTAR_USING_BLAS ) -ENDIF( NEKTAR_USE_BLAS_LAPACK ) - IF( NEKTAR_USE_TINYXML_STL ) ADD_DEFINITIONS( -DTIXML_USE_STL) ENDIF( NEKTAR_USE_TINYXML_STL ) -IF( NEKTAR_USE_DIRECT_BLAS_CALLS ) - ADD_DEFINITIONS(-DNEKTAR_USING_DIRECT_BLAS_CALLS) -ENDIF( NEKTAR_USE_DIRECT_BLAS_CALLS ) - IF( NEKTAR_USE_EXPRESSION_TEMPLATES ) ADD_DEFINITIONS(-DNEKTAR_USE_EXPRESSION_TEMPLATES -DNEKTAR_USING_CMAKE) ENDIF( NEKTAR_USE_EXPRESSION_TEMPLATES ) @@ -324,25 +313,26 @@ ENDIF (APPLE) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}) -# Build active components +# Build active components. Add utilities and solvers directories first, because +# that allows us to detect library dependencies automatically. IF (NEKTAR_BUILD_LIBRARY) - SET(NEKTAR++_LIBRARIES SolverUtils LibUtilities StdRegions SpatialDomains LocalRegions - MultiRegions Collections GlobalMapping FieldUtils NekMeshUtils) INCLUDE_DIRECTORIES(library) +ENDIF() + +INCLUDE_DIRECTORIES(utilities) +ADD_SUBDIRECTORY(utilities) + +INCLUDE_DIRECTORIES(solvers) +ADD_SUBDIRECTORY(solvers) + +IF (NEKTAR_BUILD_LIBRARY) + # List of Nektar++ libraries will be rebuilt every configuration. + SET(NEKTAR++_LIBRARIES "" CACHE INTERNAL "") + ADD_SUBDIRECTORY(library) INSTALL(EXPORT Nektar++Libraries DESTINATION ${LIB_DIR}/cmake COMPONENT dev) ENDIF (NEKTAR_BUILD_LIBRARY) -IF (NEKTAR_BUILD_SOLVERS) - INCLUDE_DIRECTORIES(solvers) - ADD_SUBDIRECTORY(solvers) -ENDIF(NEKTAR_BUILD_SOLVERS) - -IF (NEKTAR_BUILD_UTILITIES) - INCLUDE_DIRECTORIES(utilities) - ADD_SUBDIRECTORY(utilities) -ENDIF (NEKTAR_BUILD_UTILITIES) - IF (NEKTAR_BUILD_TESTS) INCLUDE_DIRECTORIES(tests) ADD_SUBDIRECTORY(tests) @@ -374,17 +364,18 @@ INSTALL(FILES ${CMAKE_BINARY_DIR}/Nektar++Config.cmake # Install ThirdParty headers to subdirectory of ${NEKTAR_INCLUDE_DIR} INSTALL(DIRECTORY ${TPDIST}/include/ DESTINATION ${NEKTAR_INCLUDE_DIR}/ThirdParty - COMPONENT ThirdParty + COMPONENT dev ) -# Install ThirdParty libraries into ${NEKTAR_LIB_DIR} +# Install ThirdParty libraries into ${NEKTAR_LIB_DIR}. These are shipped with +# the LibUtilities library. INSTALL(DIRECTORY ${TPDIST}/lib/ DESTINATION ${NEKTAR_LIB_DIR} - COMPONENT ThirdParty + COMPONENT libutilities ) +ADD_SUBDIRECTORY(docs) + IF(NEKTAR_BUILD_PACKAGES) - add_subdirectory(pkg) + ADD_SUBDIRECTORY(pkg) ENDIF(NEKTAR_BUILD_PACKAGES) - -ADD_SUBDIRECTORY(docs) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dd647a553d6306871c43ec0d5de99576394f5b40..12a17c17145c4e8efe6eb5c1bc6c925c6bd5bf90 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,7 +44,9 @@ project. It's a pretty simple process: diff and are not quite ready to merge, use the `[WIP]` tag in the title to prevent your code from being accidentally merged. 5. Put a comment in the MR saying that it's ready to be merged. -6. Respond to any comments in the code review. +6. If your branch is a minor fix that could appear in the next patch release, + then add the `Proposed patch` label to the merge request. +7. Respond to any comments in the code review. ## Submission checklist - Did you add regression tests (for fixes) or unit tests and/or normal tests for @@ -155,6 +157,78 @@ stick to the following process: - Once feedback received from the branch author (if necessary) and reviewers are happy, the branch will be merged. +## Release branches +Nektar++ releases are versioned in the standard form `x.y.z` where `x` is a +major release, `y` a minor release and `z` a patch release: + +- major releases are extremely infrequent (on the order of every 2-3 years) and + denote major changes in functionality and the API; +- minor releases occur around twice per year and contain new features with minor + API changes; +- patch releases are targeted on roughly a monthly basis and are intended to + fix minor issues in the code. + +The repository contains a number of _release branches_ named `release/x.y` for +each minor release, which are intended to contain **fixes and very minor +changes** from `master` and which form the next patch release. This allows us to +use `master` for the next minor release, whilst still having key fixes in patch +releases. + +### Cherry-picking process + +Any branches that are marked with the `Proposed patch` label should follow the +following additional steps to cherry pick commits into the `release/x.y` branch. + +1. If the branch is on a remote other than `nektar/nektar`, make sure that's + added to your local repository. +2. On a local terminal, run `git fetch --all` to pull the latest changes. It's + important for the commands below that you do this _before_ you merge the + branch into `master`. +3. Merge the branch into master as usual using GitLab. +4. Switch to the appropriate branch with `git checkout release/x.y` and update + with `git pull`. +5. Now check the list of commits to cherry-pick. + + ```bash + git log --oneline --no-merges --reverse origin/master..REMOTE/fix/BRANCHNAME + ``` + + where `REMOTE` is the remote on which the branch lives and `BRANCHNAME` is + the fix branch. If the list is empty, you probably did a `git fetch` after + you merged the branch into `master`; in this case use `origin/master^`. +6. If you're happy with the list (compare to the MR list on the GitLab MR if + necessary), cherry-pick the commits with the command: + + ```bash + git cherry-pick -x $(git rev-list --no-merges --reverse origin/master..REMOTE/fix/BRANCHNAME) + ``` + +7. It's likely you'll encounter some conflicts, particularly with the + `CHANGELOG`. To fix these: + - `git status` to see what's broken + - Fix appropriately + - `git commit -a` to commit your fix + - `git cherry-pick --continue` +8. If everything becomes horribly broken, `git cherry-pick --abort`. +9. Once you're happy, `git push` to send your changes back to GitLab. + +Steps 5 and 6 can be simplified by creating a script +```bash +#!/bin/bash +src=$1 + +logopts="--oneline --no-merges --reverse" +commits=`git log $logopts master..$1 | cut -f 1 -d " " | xargs` + +echo "Will cherry-pick the following commits: $commits" +echo "Press ENTER to continue..." +read + +cherryopts="-x --allow-empty --allow-empty-message" +git cherry-pick $cherryopts $commits +``` +which accepts the name of the source branch as the sole argument. + ## Formatting guidelines Nektar++ uses C++, a language notorious for being easy to make obtuse and difficult to follow code. To hopefully alleviate this problem, there are a diff --git a/cmake/FindGSMPI.cmake b/cmake/FindGSMPI.cmake deleted file mode 100644 index cae5fc67aa480c5dabc5f9ba5d32ae47d0529de3..0000000000000000000000000000000000000000 --- a/cmake/FindGSMPI.cmake +++ /dev/null @@ -1,26 +0,0 @@ - -SET(GSMPI_SEARCH_PATHS - ${CMAKE_SOURCE_DIR}/ThirdParty/gsmpi-1.2/ - ${CMAKE_SOURCE_DIR}/ThirdParty/gsmpi-1.2/build/ - ${CMAKE_SOURCE_DIR}/../ThirdParty/gsmpi-1.2/ - ${CMAKE_SOURCE_DIR}/../ThirdParty/gsmpi-1.2/build - ${CMAKE_SOURCE_DIR}/ThirdParty/dist/lib - ${CMAKE_SOURCE_DIR}/../ThirdParty/dist/lib) - -FIND_LIBRARY(GSMPI_LIBRARY NAMES gsmpi PATHS ${GSMPI_SEARCH_PATHS}) - -SET(GSMPI_FOUND FALSE) -IF (GSMPI_LIBRARY) - SET(GSMPI_FOUND TRUE) - MARK_AS_ADVANCED(GSMPI_LIBRARY) -ENDIF (GSMPI_LIBRARY) - -IF (GSMPI_FOUND) - IF (NOT GSMPI_FIND_QUIETLY) - MESSAGE(STATUS "Found GSMPI") - ENDIF (NOT GSMPI_FIND_QUIETLY) -ELSE(GSMPI_FOUND) - IF (GSMPI_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "Could not find GSLib") - ENDIF (GSMPI_FIND_REQUIRED) -ENDIF (GSMPI_FOUND) diff --git a/cmake/FindPETSc.cmake b/cmake/FindPETSc.cmake index f06b55df2c155b0d57abd2f42f628343d65fa2b9..cd7e1b4b4992cd1619b18fdea0ab260a1eeb1d9b 100644 --- a/cmake/FindPETSc.cmake +++ b/cmake/FindPETSc.cmake @@ -198,7 +198,7 @@ show : else (WIN32) set (libname ${name}) endif (WIN32) - find_library (PETSC_LIBRARY_${suffix} NAMES ${libname} HINTS ${petsc_lib_dir} NO_DEFAULT_PATH) + find_library (PETSC_LIBRARY_${suffix} NAMES ${libname} ${libname}_real HINTS ${petsc_lib_dir} NO_DEFAULT_PATH) set (PETSC_LIBRARIES_${suffix} "${PETSC_LIBRARY_${suffix}}") mark_as_advanced (PETSC_LIBRARY_${suffix}) endmacro (PETSC_FIND_LIBRARY suffix name) diff --git a/cmake/FindPackageMultipass.cmake b/cmake/FindPackageMultipass.cmake index fa350a928c9946e052b8c227f6537f434e24b902..fbf06a7f0fc3aa20a0387f091eac4f74e7ffdab2 100644 --- a/cmake/FindPackageMultipass.cmake +++ b/cmake/FindPackageMultipass.cmake @@ -25,7 +25,7 @@ # Always runs the given test, use this when you need to re-run tests # because parent variables have made old cache entries stale. The LANGUAGE # variable is either C or CXX indicating which compiler the test should -# use. +# use. # MULTIPASS_C_SOURCE_RUNS (Name INCLUDES LIBRARIES SOURCE RUNS) # DEPRECATED! This is only included for backwards compatability. Use # the more general MULTIPASS_SOURCE_RUNS instead. @@ -46,7 +46,7 @@ macro (FIND_PACKAGE_MULTIPASS _name _current) # The name of the stored value for the given state set (_stored_var PACKAGE_MULTIPASS_${_NAME}_${_state}) if (NOT "${${_stored_var}}" STREQUAL "${${_NAME}_${_state}}") - set (_states_current "NO") + set (_states_current "NO") endif (NOT "${${_stored_var}}" STREQUAL "${${_NAME}_${_state}}") set (${_stored_var} "${${_NAME}_${_state}}" CACHE INTERNAL "Stored state for ${_name}." FORCE) list (REMOVE_AT _args 0) @@ -68,7 +68,7 @@ macro (FIND_PACKAGE_MULTIPASS _name _current) if (_cmd STREQUAL "DEPENDENTS") list (REMOVE_AT _args 0) foreach (dep ${_args}) - set (${_NAME}_${dep} "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) + set (${_NAME}_${dep} "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) endforeach (dep) endif (_cmd STREQUAL "DEPENDENTS") set (${_NAME}_FOUND "NOTFOUND" CACHE INTERNAL "Cleared" FORCE) diff --git a/cmake/FindSMV.cmake b/cmake/FindSMV.cmake deleted file mode 100644 index 45b2a6d85d782132543c35be1c1f89e5bfabc267..0000000000000000000000000000000000000000 --- a/cmake/FindSMV.cmake +++ /dev/null @@ -1,28 +0,0 @@ - -SET(SMV_SEARCH_PATHS - ${CMAKE_SOURCE_DIR}/ThirdParty/libsmv/build/lib - ${CMAKE_SOURCE_DIR}/../ThirdParty/libsmv/build/lib - ${CMAKE_SOURCE_DIR}/ThirdParty/dist/build/lib - ${CMAKE_SOURCE_DIR}/../ThirdParty/dist/build/lib) - -FIND_PATH(SMV_INCLUDE_DIR NAMES smv.h PATHS ${SMV_SEARCH_PATHS}) -FIND_LIBRARY(SMV_LIBRARY NAMES smv PATHS ${SMV_SEARCH_PATHS}) - - -SET(SMV_FOUND FALSE) -IF (SMV_LIBRARY) - SET(SMV_FOUND TRUE) - INCLUDE_DIRECTORIES(${SMV_INCLUDE_DIR}) - MARK_AS_ADVANCED(SMV_LIBRARY) - MARK_AS_ADVANCED(SMV_INCLUDE_DIR) -ENDIF (SMV_LIBRARY) - -IF (SMV_FOUND) - IF (NOT SMV_FIND_QUIETLY) - MESSAGE(STATUS "Found SMV: ${SMV_INCLUDE_DIR}") - ENDIF (NOT SMV_FIND_QUIETLY) -ELSE(SMV_FOUND) - IF (SMV_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "Could not find SMV") - ENDIF (SMV_FIND_REQUIRED) -ENDIF (SMV_FOUND) diff --git a/cmake/FindXXT.cmake b/cmake/FindXXT.cmake deleted file mode 100644 index 6f74874e8c0c73463751af0d4bf953cb17bf7e99..0000000000000000000000000000000000000000 --- a/cmake/FindXXT.cmake +++ /dev/null @@ -1,26 +0,0 @@ - -SET(XXT_SEARCH_PATHS - ${CMAKE_SOURCE_DIR}/ThirdParty/gsmpi-1.2/ - ${CMAKE_SOURCE_DIR}/ThirdParty/gsmpi-1.2/build/ - ${CMAKE_SOURCE_DIR}/../ThirdParty/gsmpi-1.2/ - ${CMAKE_SOURCE_DIR}/../ThirdParty/gsmpi-1.2/build - ${CMAKE_SOURCE_DIR}/ThirdParty/dist/lib - ${CMAKE_SOURCE_DIR}/../ThirdParty/dist/lib) - -FIND_LIBRARY(XXT_LIBRARY NAMES xxt PATHS ${XXT_SEARCH_PATHS}) - -SET(XXT_FOUND FALSE) -IF (XXT_LIBRARY) - SET(XXT_FOUND TRUE) - MARK_AS_ADVANCED(XXT_LIBRARY) -ENDIF (XXT_LIBRARY) - -IF (XXT_FOUND) - IF (NOT XXT_FIND_QUIETLY) - MESSAGE(STATUS "Found XXT") - ENDIF (NOT XXT_FIND_QUIETLY) -ELSE(XXT_FOUND) - IF (XXT_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "Could not find XXT") - ENDIF (XXT_FIND_REQUIRED) -ENDIF (XXT_FOUND) diff --git a/cmake/Nektar++Config.cmake.in b/cmake/Nektar++Config.cmake.in index 3cdd99a1335e8a0ec50bc4446bded81411fa9aea..cd5a341a99e91fee2411597d56c3038f31713e92 100644 --- a/cmake/Nektar++Config.cmake.in +++ b/cmake/Nektar++Config.cmake.in @@ -50,10 +50,6 @@ SET(Boost_LIBRARY_DIRS "@Boost_CONFIG_LIBRARY_DIR@") SET(NEKTAR++_TP_LIBRARIES ${NEKTAR++_TP_LIBRARIES} ${Boost_LIBRARIES}) -SET(LOKI_INCLUDE_DIRS "@LOKI_CONFIG_INCLUDE_DIR@") -SET(NEKTAR++_TP_INCLUDE_DIRS - ${NEKTAR++_TP_INCLUDE_DIRS} ${LOKI_INCLUDE_DIRS}) - SET(TINYXML_INCLUDE_DIRS "@TINYXML_CONFIG_INCLUDE_DIR@") SET(NEKTAR++_TP_INCLUDE_DIRS ${NEKTAR++_TP_INCLUDE_DIRS} ${TINYXML_INCLUDE_DIRS}) diff --git a/cmake/NektarCommon.cmake b/cmake/NektarCommon.cmake index 230bd843e8a0992a4d8658387a0d08eec483316a..8dd00a517566c8abc0a49cd5888cd21acb4edcf0 100644 --- a/cmake/NektarCommon.cmake +++ b/cmake/NektarCommon.cmake @@ -1,10 +1,67 @@ -MACRO(CHANGE_EXTENSION output var new_ext) - GET_FILENAME_COMPONENT(FileName ${var} NAME_WE) - GET_FILENAME_COMPONENT(Path ${var} PATH) - SET(${output} ${Path}/${FileName}.${new_ext}) +## +## NektarCommon.cmake +## +## Frequently used Nektar++ CMake configuration macros and functions +## + +# +# THIRDPARTY_LIBRARY(varname DESCRIPTION [STATIC|SHARED] lib1 [lib2]...) +# +# Updates a variable containing the name of a third-party shared or static +# library to point to an absolute path defining its location instead of adding +# `-llibname` to the linker flags. This avoids the issue of e.g. linking against +# an outdated system zlib installation. +# +# Arguments: +# - `varname`: variable name containing the third-party library name. On +# output will be altered to update the correct path. +# - `DESCRIPTION`: a brief description of the variable (used in the SET +# command). +# - `SHARED`: if the library will be built as a shared library +# - `STATIC`: if the library will be built as a static library +# +MACRO(THIRDPARTY_LIBRARY varname) + CMAKE_PARSE_ARGUMENTS(TPLIB "" "DESCRIPTION" "STATIC;SHARED" ${ARGN}) + + IF(TPLIB_SHARED) + SET(LIBTYPE "SHARED") + SET(TPLIBS ${TPLIB_SHARED}) + ELSEIF(TPLIB_STATIC) + SET(LIBTYPE "STATIC") + SET(TPLIBS ${TPLIB_STATIC}) + ENDIF() + + FOREACH (lib ${TPLIBS}) + LIST(APPEND tmplist "${TPDIST}/lib/${CMAKE_${LIBTYPE}_LIBRARY_PREFIX}${lib}${CMAKE_${LIBTYPE}_LIBRARY_SUFFIX}") + ENDFOREACH() + + SET(${varname} ${tmplist} CACHE FILEPATH ${TPLIB_DESCRIPTION} FORCE) + UNSET(tmplist) + UNSET(LIBTYPE) + UNSET(TPLIBS) + UNSET(TPLIB_SHARED) + UNSET(TPLIB_STATIC) + UNSET(lib) ENDMACRO() - +# +# SET_COMMON_PROPERTIES(target) +# +# Sets properties that are common to either library or executable targets. This +# includes: +# +# - Name suffixes: -g for debug, -ms for minsize, -rg for release w/debug. +# - Disable some MSVC compiler warnings +# - Add -pg flag if NEKTAR_ENABLE_PROFILE is switched on and we're using gcc +# - Add compiler definitions and appropriate warning levels to gcc-like +# compilers (e.g. clang) +# - Define versions for the target +# - Make sure that -fPIC is enabled for library code if building shared +# libraries. +# +# Arguments: +# - `target`: target name +# MACRO(SET_COMMON_PROPERTIES name) SET_TARGET_PROPERTIES(${name} PROPERTIES DEBUG_POSTFIX -g) SET_TARGET_PROPERTIES(${name} PROPERTIES MINSIZEREL_POSTFIX -ms) @@ -18,8 +75,8 @@ MACRO(SET_COMMON_PROPERTIES name) # warning)" warning (4800) # 4250 - Inheritance via dominance. Nektar appears to be handling the # diamond correctly. - # 4373 - Overriding a virtual method with parameters that differ by const - # or volatile conforms to the standard. + # 4373 - Overriding a virtual method with parameters that differ by const + # or volatile conforms to the standard. # /Za is necessary to prevent temporaries being bound to reference # parameters. SET_TARGET_PROPERTIES(${name} PROPERTIES COMPILE_FLAGS @@ -29,14 +86,14 @@ MACRO(SET_COMMON_PROPERTIES name) SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") ENDIF( MSVC ) - IF( ${CMAKE_COMPILER_IS_GNUCXX} ) - IF(NEKTAR_ENABLE_PROFILE) - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg") - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -pg") - SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -pg") + IF (${CMAKE_COMPILER_IS_GNUCXX}) + IF(NEKTAR_ENABLE_PROFILE) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg") + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -pg") + SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -pg") SET(LINK_FLAGS "${LINK_FLAGS} -pg") ENDIF(NEKTAR_ENABLE_PROFILE) - ENDIF( ${CMAKE_COMPILER_IS_GNUCXX} ) + ENDIF() # Prevent including these common flags multiple times. IF (NOT ${CMAKE_CXX_FLAGS_DEBUG} MATCHES ".*DNEKTAR_DEBUG.*") @@ -47,18 +104,18 @@ MACRO(SET_COMMON_PROPERTIES name) "${CMAKE_CXX_FLAGS_DEBUG} -DNEKTAR_FULLDEBUG") ENDIF( NEKTAR_FULL_DEBUG) - IF( NOT MSVC ) + IF(NOT MSVC) SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall -Wno-deprecated -Wno-sign-compare") SET(CMAKE_CXX_FLAGS_RELEASE - "${CMAKE_CXX_FLAGS_RELEASE} -Wall -Wno-deprecated -Wno-sign-compare") + "${CMAKE_CXX_FLAGS_RELEASE} -Wall -Wno-deprecated -Wno-sign-compare") SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO - "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -Wall -Wno-deprecated -Wno-sign-compare") + "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -Wall -Wno-deprecated -Wno-sign-compare") IF (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fpermissive") ENDIF() - ENDIF( NOT MSVC) + ENDIF (NOT MSVC) # Define version SET_PROPERTY(TARGET ${name} @@ -76,91 +133,119 @@ MACRO(SET_COMMON_PROPERTIES name) ENDIF( CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" ) ENDMACRO(SET_COMMON_PROPERTIES name) -MACRO(SETUP_PRECOMPILED_HEADERS sourceFiles precompiledHeader) - IF( NEKTAR_USE_PRECOMPILED_HEADERS ) - IF( MSVC ) - # /Yu"stdafx.h" - #MESSAGE(${${precompiledHeader}}) - #MESSAGE(${${sourceFiles}}) - SET_SOURCE_FILES_PROPERTIES(${${sourceFiles}} - PROPERTIES COMPILE_FLAGS "/Yu\"${${precompiledHeader}}\"") - LIST(GET ${sourceFiles} 0 OUTVAR) - #MESSAGE(${OUTVAR}) - SET_SOURCE_FILES_PROPERTIES(${OUTVAR} - PROPERTIES COMPILE_FLAGS "/Yc\"${${precompiledHeader}}\"") - - ENDIF() +# +# ADD_NEKTAR_EXECUTABLE(name COMPONENT [DEPENDS dep1 ...] [SOURCES src1 ...]) +# +# Adds a new executable to a component with the supplied component dependencies +# and sources files. +# +# Arguments: +# - `name`: target name to construct +# - `COMPONENT`: component name in which this target will live (e.g. demos) +# - `DEPENDS`: a list of components on which this target depends on +# - `SOURCES`: a list of source files for this target +# +MACRO(ADD_NEKTAR_EXECUTABLE name) + CMAKE_PARSE_ARGUMENTS(NEKEXE "" "COMPONENT" "DEPENDS;SOURCES" ${ARGN}) + ADD_EXECUTABLE(${name} ${NEKEXE_SOURCES}) + SET_COMMON_PROPERTIES(${name}) + + IF (${CMAKE_SYSTEM} MATCHES "Linux.*") + SET_PROPERTY(TARGET ${name} APPEND_STRING PROPERTY COMPILE_FLAGS " -pthread") + SET_PROPERTY(TARGET ${name} APPEND_STRING PROPERTY LINK_FLAGS " -pthread") ENDIF() + + STRING(TOLOWER ${NEKEXE_COMPONENT} NEKEXE_COMPONENT) + STRING(TOUPPER ${NEKEXE_COMPONENT} NEKEXE_COMPVAR) + + SET_PROPERTY(TARGET ${name} PROPERTY FOLDER ${NEKEXE_COMPONENT}) + INSTALL(TARGETS ${name} + RUNTIME DESTINATION ${NEKTAR_BIN_DIR} COMPONENT ${NEKEXE_COMPONENT} OPTIONAL + ARCHIVE DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${NEKEXE_COMPONENT} OPTIONAL + LIBRARY DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${NEKEXE_COMPONENT} OPTIONAL) + + # Add dependencies for executable. + TARGET_LINK_LIBRARIES(${name} LINK_PUBLIC ${NEKEXE_DEPENDS}) ENDMACRO() -MACRO(ADD_NEKTAR_EXECUTABLE name component sources) - IF( ${ARGC} LESS 4 ) - ADD_EXECUTABLE(${name} ${${sources}}) - ELSE( ${ARGC} LESS 4 ) - ADD_EXECUTABLE(${name} ${${sources}} ${${ARGV3}}) - ENDIF( ${ARGC} LESS 4) - - SET_COMMON_PROPERTIES(${name}) - - IF( ${CMAKE_SYSTEM} MATCHES "Linux.*" ) - # The boost thread library needs pthread on linux. - GET_TARGET_PROPERTY(THE_COMPILE_FLAGS ${name} COMPILE_FLAGS) - GET_TARGET_PROPERTY(THE_LINK_FLAGS ${name} LINK_FLAGS) - - # It is possible these flags have not been set yet. - IF(NOT THE_COMPILE_FLAGS) - SET(THE_COMPILE_FLAGS "") - ENDIF(NOT THE_COMPILE_FLAGS) - - IF(NOT THE_LINK_FLAGS ) - SET(THE_LINK_FLAGS "") - ENDIF(NOT THE_LINK_FLAGS) - - SET_TARGET_PROPERTIES(${name} - PROPERTIES COMPILE_FLAGS "${THE_COMPILE_FLAGS} -pthread") - SET_TARGET_PROPERTIES(${name} - PROPERTIES LINK_FLAGS "${THE_LINK_FLAGS} -pthread") - - ENDIF( ${CMAKE_SYSTEM} MATCHES "Linux.*" ) - - SET_PROPERTY(TARGET ${name} PROPERTY FOLDER ${component}) - INSTALL(TARGETS ${name} - RUNTIME DESTINATION ${NEKTAR_BIN_DIR} COMPONENT ${component} OPTIONAL - ARCHIVE DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${component} OPTIONAL - LIBRARY DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${component} OPTIONAL) - -ENDMACRO(ADD_NEKTAR_EXECUTABLE name component sources) - -MACRO(ADD_NEKTAR_LIBRARY name component type) - ADD_LIBRARY(${name} ${type} ${ARGN}) - - SET_PROPERTY(TARGET ${name} PROPERTY FOLDER ${component}) +# +# ADD_NEKTAR_LIBRARY(name +# DESCRIPTION +# SUMMARY +# DEPENDS dep1 dep2 ... +# SOURCES src1 src2 ... +# HEADERS head1 head2 ...) +# +# Adds a new library to a component with the supplied component dependencies and +# sources files. A new component will be set up automatically with a lower-case +# name: e.g. if the supplied library name is `LibUtilities` the corresponding +# component is `libutilities`. +# +# Arguments: +# - `name`: target name to construct +# - `SUMMARY`: a brief summary of the library +# - `DESCRIPTION`: a more detailed description of the library +# - `DEPENDS`: a list of components on which this target depends on +# - `SOURCES`: a list of source files for this target +# - `HEADERS`: a list of header files for this target. These will be +# automatically put into a `dev` package. +# +MACRO(ADD_NEKTAR_LIBRARY name) + CMAKE_PARSE_ARGUMENTS(NEKLIB "" "DESCRIPTION;SUMMARY" "DEPENDS;SOURCES;HEADERS" ${ARGN}) + + ADD_LIBRARY(${name} ${NEKTAR_LIBRARY_TYPE} ${NEKLIB_SOURCES} ${NEKLIB_HEADERS}) + + # Infer component name from lower-case library name, variables should use + # upper-case. + STRING(TOLOWER ${name} NEKLIB_COMPONENT) + STRING(TOUPPER ${name} NEKLIB_COMPVAR) + + # Add name to a list so that we know for constructing dependencies. + SET(NEKTAR++_LIBRARIES ${NEKTAR++_LIBRARIES} ${name} CACHE INTERNAL "") + + SET_PROPERTY(TARGET ${name} PROPERTY FOLDER ${NEKLIB_COMPONENT}) SET_PROPERTY(TARGET ${name} PROPERTY VERSION ${NEKTAR_VERSION}) SET_COMMON_PROPERTIES(${name}) - INSTALL(TARGETS ${name} - EXPORT Nektar++Libraries - RUNTIME DESTINATION ${NEKTAR_BIN_DIR} COMPONENT ${component} OPTIONAL - ARCHIVE DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${component} OPTIONAL - LIBRARY DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${component} OPTIONAL) - -ENDMACRO(ADD_NEKTAR_LIBRARY name component type) + INSTALL(TARGETS ${name} + EXPORT Nektar++Libraries + RUNTIME DESTINATION ${NEKTAR_BIN_DIR} COMPONENT ${NEKLIB_COMPONENT} OPTIONAL + ARCHIVE DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${NEKLIB_COMPONENT} OPTIONAL + LIBRARY DESTINATION ${NEKTAR_LIB_DIR} COMPONENT ${NEKLIB_COMPONENT} OPTIONAL) + + FOREACH(HEADER ${NEKLIB_HEADERS}) + STRING(REGEX MATCH "(.*)[/\\]" DIR ${HEADER}) + INSTALL(FILES ${HEADER} + DESTINATION ${NEKTAR_INCLUDE_DIR}/${name}/${DIR} + COMPONENT dev) + ENDFOREACH() + + # If we have dependencies then link against them. + IF(NEKLIB_DEPENDS) + TARGET_LINK_LIBRARIES(${name} LINK_PUBLIC ${NEKLIB_DEPENDS}) + ENDIF() +ENDMACRO() -# Adds a test with a given name. -# The Test Definition File should be in a subdirectory called Tests relative -# to the CMakeLists.txt file calling this macros. The test file should be called -# NAME.tst, where NAME is given as a parameter to this macro. +# +# ADD_NEKTAR_TEST(name [LENGTHY]) +# +# Adds a test with a given name. The Test Definition File should be in a +# subdirectory called Tests relative to the CMakeLists.txt file calling this +# macros. The test file should be called NAME.tst, where NAME is given as a +# parameter to this macro. If the LENGTHY flag is given, the test will only be +# run if `NEKTAR_TEST_ALL` is enabled. +# +# Arguments: +# - `name`: name of the test file +# - `LENGTHY`: denotes a test that requires extended runtime. +# MACRO(ADD_NEKTAR_TEST name) - GET_FILENAME_COMPONENT(dir ${CMAKE_CURRENT_SOURCE_DIR} NAME) - ADD_TEST(NAME ${dir}_${name} - COMMAND Tester ${CMAKE_CURRENT_SOURCE_DIR}/Tests/${name}.tst) -ENDMACRO(ADD_NEKTAR_TEST) + CMAKE_PARSE_ARGUMENTS(NEKTEST "LENGTHY" "" "" ${ARGN}) -MACRO(ADD_NEKTAR_TEST_LENGTHY name) - IF (NEKTAR_TEST_ALL) + IF (NOT NEKTEST_LENGTHY OR NEKTAR_TEST_ALL) GET_FILENAME_COMPONENT(dir ${CMAKE_CURRENT_SOURCE_DIR} NAME) ADD_TEST(NAME ${dir}_${name} - COMMAND Tester ${CMAKE_CURRENT_SOURCE_DIR}/Tests/${name}.tst) - ENDIF(NEKTAR_TEST_ALL) -ENDMACRO(ADD_NEKTAR_TEST_LENGTHY) + COMMAND Tester ${CMAKE_CURRENT_SOURCE_DIR}/Tests/${name}.tst) + ENDIF() +ENDMACRO(ADD_NEKTAR_TEST) diff --git a/cmake/ResolveCompilerPaths.cmake b/cmake/ResolveCompilerPaths.cmake index 6610e30ec6d28f16ea58664a120f28ef6f08c3c2..54787fa38ffa50136414e6c788c50fb3c63746b8 100644 --- a/cmake/ResolveCompilerPaths.cmake +++ b/cmake/ResolveCompilerPaths.cmake @@ -42,8 +42,8 @@ include (CorrectWindowsPaths) macro (RESOLVE_LIBRARIES LIBS LINK_LINE) string (REGEX MATCHALL "((-L|-l|-Wl)([^\" ]+|\"[^\"]+\")|[^\" ]+\\.(a|so|dll|lib))" _all_tokens "${LINK_LINE}") - set (_libs_found) - set (_directory_list) + set (_libs_found "") + set (_directory_list "") foreach (token ${_all_tokens}) if (token MATCHES "-L([^\" ]+|\"[^\"]+\")") # If it's a library path, add it to the list @@ -58,7 +58,7 @@ macro (RESOLVE_LIBRARIES LIBS LINK_LINE) else (WIN32) string (REGEX REPLACE "^-l" "" token ${token}) endif (WIN32) - set (_root) + set (_root "") if (token MATCHES "^/") # We have an absolute path #separate into a path and a library name: string (REGEX MATCH "[^/]*\\.(a|so|dll|lib)$" libname ${token}) diff --git a/cmake/ThirdPartyBoost.cmake b/cmake/ThirdPartyBoost.cmake index 0a3ad188155eab25ed1a3e75469af9b2fbc7ed24..98b2b9bcbe5b6e048d6c5d6be27de471dfae78be 100644 --- a/cmake/ThirdPartyBoost.cmake +++ b/cmake/ThirdPartyBoost.cmake @@ -10,7 +10,7 @@ MESSAGE(STATUS "Searching for Boost:") SET(MIN_VER "1.56.0") SET(NEEDED_BOOST_LIBS thread iostreams date_time filesystem system - program_options regex timer chrono) + program_options regex) SET(Boost_DEBUG 0) SET(Boost_NO_BOOST_CMAKE ON) IF( BOOST_ROOT ) @@ -65,9 +65,9 @@ IF (THIRDPARTY_BUILD_BOOST) INCLUDE(ExternalProject) # Only build the libraries we need - SET(BOOST_LIB_LIST --with-system --with-iostreams --with-filesystem - --with-program_options --with-date_time --with-thread - --with-regex --with-timer --with-chrono) + FOREACH(boostlib ${NEEDED_BOOST_LIBS}) + LIST(APPEND BOOST_LIB_LIST --with-${boostlib}) + ENDFOREACH() IF (NOT WIN32) # We need -fPIC for 64-bit builds @@ -89,9 +89,9 @@ IF (THIRDPARTY_BUILD_BOOST) ELSEIF (MSVC14) SET(TOOLSET msvc-14.0) ENDIF() - ELSE(APPLE) + ELSE() SET(TOOLSET gcc) - ENDIF(APPLE) + ENDIF() IF (NOT WIN32) EXTERNALPROJECT_ADD( @@ -162,40 +162,24 @@ IF (THIRDPARTY_BUILD_BOOST) ENDIF(THIRDPARTY_BUILD_ZLIB) # Set up CMake variables - SET(Boost_CHRONO_LIBRARY boost_chrono) - SET(Boost_CHRONO_LIBRARY_DEBUG boost_chrono) - SET(Boost_CHRONO_LIBRARY_RELEASE boost_chrono) - SET(Boost_DATE_TIME_LIBRARY boost_date_time) - SET(Boost_DATE_TIME_LIBRARY_DEBUG boost_date_time) - SET(Boost_DATE_TIME_LIBRARY_RELEASE boost_date_time) - SET(Boost_FILESYSTEM_LIBRARY boost_filesystem) - SET(Boost_FILESYSTEM_LIBRARY_DEBUG boost_filesystem) - SET(Boost_FILESYSTEM_LIBRARY_RELEASE boost_filesystem) - SET(Boost_IOSTREAMS_LIBRARY boost_iostreams) - SET(Boost_IOSTREAMS_LIBRARY_DEBUG boost_iostreams) - SET(Boost_IOSTREAMS_LIBRARY_RELEASE boost_iostreams) - SET(Boost_PROGRAM_OPTIONS_LIBRARY boost_program_options) - SET(Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG boost_program_options) - SET(Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE boost_program_options) - SET(Boost_REGEX_LIBRARY boost_regex) - SET(Boost_REGEX_LIBRARY_DEBUG boost_regex) - SET(Boost_REGEX_LIBRARY_RELEASE boost_regex) - SET(Boost_SYSTEM_LIBRARY boost_system) - SET(Boost_SYSTEM_LIBRARY_DEBUG boost_system) - SET(Boost_SYSTEM_LIBRARY_RELEASE boost_system) - SET(Boost_THREAD_LIBRARY boost_thread) - SET(Boost_THREAD_LIBRARY_DEBUG boost_thread) - SET(Boost_THREAD_LIBRARY_RELEASE boost_thread) - SET(Boost_TIMER_LIBRARY boost_timer) - SET(Boost_TIMER_LIBRARY_DEBUG boost_timer) - SET(Boost_TIMER_LIBRARY_RELEASE boost_timer) + FOREACH(BOOSTLIB ${NEEDED_BOOST_LIBS}) + STRING(TOUPPER ${BOOSTLIB} BOOSTLIB_UPPER) + THIRDPARTY_LIBRARY(Boost_${BOOSTLIB_UPPER}_LIBRARY + SHARED boost_${BOOSTLIB} DESCRIPTION "Boost ${BOOSTLIB} library") + THIRDPARTY_LIBRARY(Boost_${BOOSTLIB_UPPER}_LIBRARY_DEBUG + SHARED boost_${BOOSTLIB} DESCRIPTION "Boost ${BOOSTLIB} library, debug") + THIRDPARTY_LIBRARY(Boost_${BOOSTLIB_UPPER}_LIBRARY_RELEASE + SHARED boost_${BOOSTLIB} DESCRIPTION "Boost ${BOOSTLIB} library, release") + MARK_AS_ADVANCED(Boost_${BOOSTLIB_UPPER}_LIBRARY) + MARK_AS_ADVANCED(Boost_${BOOSTLIB_UPPER}_LIBRARY_DEBUG) + MARK_AS_ADVANCED(Boost_${BOOSTLIB_UPPER}_LIBRARY_RELEASE) + LIST(APPEND Boost_LIBRARIES ${Boost_${BOOSTLIB_UPPER}_LIBRARY}) + ENDFOREACH() SET(Boost_INCLUDE_DIRS ${TPSRC}/dist/include) SET(Boost_CONFIG_INCLUDE_DIR ${TPINC}) SET(Boost_LIBRARY_DIRS ${TPSRC}/dist/lib) SET(Boost_CONFIG_LIBRARY_DIR ${TPLIB}) - SET(Boost_LIBRARIES boost_chrono boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer) - LINK_DIRECTORIES(${Boost_LIBRARY_DIRS}) STRING(REPLACE ";" ", " NEEDED_BOOST_LIBS_STRING "${NEEDED_BOOST_LIBS}") MESSAGE(STATUS "Build boost libs: ${NEEDED_BOOST_LIBS_STRING}") @@ -208,4 +192,4 @@ ELSE (THIRDPARTY_BUILD_BOOST) SET(Boost_CONFIG_LIBRARY_DIR ${Boost_LIBRARY_DIRS}) ENDIF (THIRDPARTY_BUILD_BOOST) -INCLUDE_DIRECTORIES(SYSTEM ${Boost_INCLUDE_DIRS}) +INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS}) diff --git a/cmake/ThirdPartyCCM.cmake b/cmake/ThirdPartyCCM.cmake index a7ab61c02ad59bf804028227e36830e33de035fd..303bd841f60ee9c66f3db27ac48d7d20535061d5 100644 --- a/cmake/ThirdPartyCCM.cmake +++ b/cmake/ThirdPartyCCM.cmake @@ -6,28 +6,80 @@ # ######################################################################## OPTION(NEKTAR_USE_CCM - "CCM star i/o library is available." OFF) + "use CCM star i/o" OFF) -IF( NEKTAR_USE_CCM ) +IF(NEKTAR_USE_CCM) + +# First search for system ccmioL installs. Hint /usr/local +FIND_PATH (CCMIO_INCLUDE_DIR ccmio.h PATHS /usr/local/include) +FIND_LIBRARY(CCMIO_LIBRARY NAMES "ccmio" PATHS /usr/local/lib) + +# If we have our library then don't build CCMIO. +IF (CCMIO_INCLUDE_DIR AND CCMIO_LIBRARY) + SET(BUILD_CCMIO OFF) +ELSE() + SET(BUILD_CCMIO ON) +ENDIF () + +OPTION(THIRDPARTY_BUILD_CCMIO + "Build CCMIO library from ThirdParty if permitted." ${BUILD_CCMIO}) + +IF (THIRDPARTY_BUILD_CCMIO) + INCLUDE(ExternalProject) + MESSAGE(WARNING "We are seeking permission to distribute ccmio with Nektar++. If you are entitled to use libccmio please contact nektar-users@imperial.ac.uk and place the file ccmio-2.06.tar.bz2 in the director $NEKTAR/ThirdParty") + EXTERNALPROJECT_ADD( + ccmio-2.06 + PREFIX ${TPSRC} + URL ${TPURL}/ccmio-2.06.tar.bz2 + URL_MD5 809ee34a983cbc8931ca23879d92b4d0 + STAMP_DIR ${TPBUILD}/stamp + DOWNLOAD_DIR ${TPSRC} + SOURCE_DIR ${TPSRC}/ccmio-2.06 + BINARY_DIR ${TPBUILD}/ccmio-2.06 + TMP_DIR ${TPBUILD}/ccmio-2.06-tmp + INSTALL_DIR ${TPDIST} + CONFIGURE_COMMAND ${CMAKE_COMMAND} + -G ${CMAKE_GENERATOR} + -DCMAKE_C_COMPILER:FILEPATH=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER:FILEPATH=${CMAKE_CXX_COMPILER} + -DCMAKE_INSTALL_PREFIX:PATH=${TPDIST} + ${TPSRC}/ccmio-2.06 + ) + SET(CCMIO_LIBRARY ccmio CACHE FILEPATH + "CCMIO library" FORCE) + SET(CCMIO_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH + "CCMIO include" FORCE) + + LINK_DIRECTORIES(${TPDIST}/lib) + + INCLUDE_DIRECTORIES(NekMesh ${CCMIO_INCLUDE_DIR}) + + IF (WIN32) + MESSAGE(STATUS + "Build CCMIO: ${TPDIST}/${LIB_DIR}/${CCMIO_LIBRARY}.dll") + ELSE () + MESSAGE(STATUS + "Build CCMIO: ${TPDIST}/${LIB_DIR}/lib${CCMIO_LIBRARY}.a") + ENDIF () + + SET(CCMIO_CONFIG_INCLUDE_DIR ${TPINC}) set(CCMIO_LIBRARIES ccmio adf ) +ELSE() + ADD_CUSTOM_TARGET(ccmio-2.06 ALL) + MESSAGE(STATUS "Found CCMIO: ${CCMIO_LIBRARY}") + SET(CCMIO_CONFIG_INCLUDE_DIR ${CCMIO_INCLUDE_DIR}) + INCLUDE_DIRECTORIES(NekMesh ${CCMIO_INCLUDE_DIR}) + LINK_DIRECTORIES(${CCMIO_LIBRARY_DIR}) + +ENDIF (THIRDPARTY_BUILD_CCMIO) + +SET(CCMIO_LIBRARIES ccmio adf) - FIND_LIBRARY(CCMIO_LIBRARY NAMES "ccmio" PATHS /usr/local/lib ${Nektar++_TP_LIBRARY_DIRS}) - - IF( CCMIO_LIBRARY ) - MESSAGE(STATUS "Found Ccmio: ${CCMIO_LIBRARY}") - MARK_AS_ADVANCED(CCMIO_LIBRARY) - ADD_DEFINITIONS(-DNEKTAR_USE_CCM) - FIND_PATH (CCMIO_INCLUDE_DIR ccmio.h) - GET_FILENAME_COMPONENT(CCMIO_LIBRARY_DIR ${CCMIO_LIBRARY} PATH) - INCLUDE_DIRECTORIES(NekMesh ${CCMIO_INCLUDE_DIR}) - LINK_DIRECTORIES(${CCMIO_LIBRARY_DIR}) - MESSAGE(STATUS ${CCMIO_LIBRARY_DIR}) - ELSE() - MESSAGE(FATAL_ERROR "Cound not find ccmio library") - ENDIF() -ENDIF( NEKTAR_USE_CCM ) +MARK_AS_ADVANCED(CCMIO_INCLUDE_DIR) +MARK_AS_ADVANCED(CCMIO_LIBRARY) +ENDIF(NEKTAR_USE_CCM) diff --git a/cmake/ThirdPartyFFTW.cmake b/cmake/ThirdPartyFFTW.cmake index 5edbd25fe7932431484a463e4d6e4f19010e199d..1961bf90b4baca46d85ee5837fe2b5e9dff0be1c 100644 --- a/cmake/ThirdPartyFFTW.cmake +++ b/cmake/ThirdPartyFFTW.cmake @@ -10,13 +10,14 @@ OPTION(NEKTAR_USE_FFTW "Use FFTW routines for performing the Fast Fourier Transform." OFF) IF (NEKTAR_USE_FFTW) - # Set some common FFTW search paths. + # Set some common FFTW search paths for the library. SET(FFTW_SEARCH_PATHS $ENV{LD_LIBRARY_PATH} $ENV{FFTW_HOME}/lib) FIND_LIBRARY(FFTW_LIBRARY NAMES fftw3 fftw3f PATHS ${FFTW_SEARCH_PATHS}) - IF (FFTW_LIBRARY) - GET_FILENAME_COMPONENT(FFTW_PATH ${FFTW_LIBRARY} PATH) - SET(FFTW_INCLUDE_DIR ${FFTW_PATH}/../include CACHE FILEPATH "FFTW include directory.") + FIND_PATH(FFTW_INCLUDE_DIR NAMES fftw3.h CACHE FILEPATH + "FFTW include directory.") + + IF (FFTW_LIBRARY AND FFTW_INCLUDE_DIR) SET(BUILD_FFTW OFF) ELSE() SET(BUILD_FFTW ON) @@ -55,9 +56,16 @@ IF (NEKTAR_USE_FFTW) MESSAGE(STATUS "Found FFTW: ${FFTW_LIBRARY}") SET(FFTW_CONFIG_INCLUDE_DIR ${FFTW_INCLUDE_DIR}) ENDIF() -ENDIF( NEKTAR_USE_FFTW ) -INCLUDE_DIRECTORIES(SYSTEM ${FFTW_INCLUDE_DIR}) + # Test if FFTW path is a system path. Only add to include path if not an + # implicitly defined CXX include path (due to GCC 6.x now providing its own + # version of some C header files and -isystem reorders include paths). + GET_FILENAME_COMPONENT(X ${CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES} ABSOLUTE) + GET_FILENAME_COMPONENT(Y ${FFTW_INCLUDE_DIR} ABSOLUTE) + IF (NOT Y MATCHES ".*${X}.*") + INCLUDE_DIRECTORIES(SYSTEM ${FFTW_INCLUDE_DIR}) + ENDIF() -MARK_AS_ADVANCED(FFTW_LIBRARY) -MARK_AS_ADVANCED(FFTW_INCLUDE_DIR) + MARK_AS_ADVANCED(FFTW_LIBRARY) + MARK_AS_ADVANCED(FFTW_INCLUDE_DIR) +ENDIF( NEKTAR_USE_FFTW ) diff --git a/cmake/ThirdPartyHDF5.cmake b/cmake/ThirdPartyHDF5.cmake index 7164a25bb5ec7213c1a998f2beedc5f9e9ffd5a5..89feb885bae4c2ec6b02eb35b96d4aaaf50d1b8f 100644 --- a/cmake/ThirdPartyHDF5.cmake +++ b/cmake/ThirdPartyHDF5.cmake @@ -61,13 +61,11 @@ IF (NEKTAR_USE_HDF5) ${TPSRC}/hdf5-1.8.16 ) - SET(HDF5_LIBRARIES hdf5-shared CACHE FILEPATH - "HDF5 libraries" FORCE) + THIRDPARTY_LIBRARY(HDF5_LIBRARIES SHARED hdf5-shared + DESCRIPTION "HDF5 library") SET(HDF5_INCLUDE_DIRS ${TPDIST}/include CACHE FILEPATH "HDF5 include directory" FORCE) - LINK_DIRECTORIES(${TPDIST}/lib) - MESSAGE(STATUS "Build HDF5: ${HDF5_LIBRARIES}") SET(HDF5_CONFIG_INCLUDE_DIR ${TPINC}) diff --git a/cmake/ThirdPartyLoki.cmake b/cmake/ThirdPartyLoki.cmake deleted file mode 100644 index 9b32269f19a2439f4fc8ed896857ade7dbb95177..0000000000000000000000000000000000000000 --- a/cmake/ThirdPartyLoki.cmake +++ /dev/null @@ -1,48 +0,0 @@ -######################################################################## -# -# ThirdParty configuration for Nektar++ -# -# Loki headers library -# -######################################################################## - -# Try to find system Loki headers. Hint /opt/local/include for MacPorts -# (although there is no Portfile for Loki currently). -FIND_PATH(LOKI_INCLUDE_DIR loki/Singleton.h PATHS /opt/local/include) - -IF (LOKI_INCLUDE_DIR) - SET(BUILD_LOKI OFF) -ELSE() - SET(BUILD_LOKI ON) -ENDIF() - -OPTION(THIRDPARTY_BUILD_LOKI - "Download and extract Loki library to ThirdParty." ${BUILD_LOKI}) - -IF (THIRDPARTY_BUILD_LOKI) - # Download Loki if it doesn't already exist. - IF (NOT EXISTS ${TPSRC}/loki-0.1.3.tar.bz2) - FILE(DOWNLOAD ${TPURL}/loki-0.1.3.tar.bz2 ${TPSRC}/loki-0.1.3.tar.bz2) - ENDIF() - - # TODO: Check hashes. - - # Extract. - EXECUTE_PROCESS( - COMMAND ${CMAKE_COMMAND} -E tar jxf ${TPSRC}/loki-0.1.3.tar.bz2 - WORKING_DIRECTORY ${TPSRC} - ) - - # Set LOKI_INCLUDE_DIR. - FILE(COPY ${TPSRC}/loki-0.1.3/include/loki/ DESTINATION ${TPDIST}/include/loki/) - SET(LOKI_INCLUDE_DIR ${TPDIST}/include CACHE PATH "" FORCE) - - MESSAGE(STATUS "Build Loki: ${LOKI_INCLUDE_DIR}") - SET(LOKI_CONFIG_INCLUDE_DIR ${TPINC}) -ELSE() - MESSAGE(STATUS "Found Loki: ${LOKI_INCLUDE_DIR}") - SET(LOKI_CONFIG_INCLUDE_DIR ${LOKI_INCLUDE_DIR}) -ENDIF() - -INCLUDE_DIRECTORIES(SYSTEM ${LOKI_INCLUDE_DIR}) -MARK_AS_ADVANCED(LOKI_INCLUDE_DIR) diff --git a/cmake/ThirdPartyMPI.cmake b/cmake/ThirdPartyMPI.cmake index 47bcd76ca91f55ef9df74c188a5fc11bb8a5f17c..4388bf119c62922e3e1e297b5bc3afb3c28b9a7e 100644 --- a/cmake/ThirdPartyMPI.cmake +++ b/cmake/ThirdPartyMPI.cmake @@ -25,7 +25,7 @@ IF( NEKTAR_USE_MPI ) IF (NOT "${HAVE_MPI_H}" OR NOT "${HAVE_MPI_SEND}") FIND_PACKAGE(MPI REQUIRED) - INCLUDE_DIRECTORIES( ${MPI_CXX_INCLUDE_PATH} ) + INCLUDE_DIRECTORIES(SYSTEM ${MPI_CXX_INCLUDE_PATH} ) MESSAGE(STATUS "Found MPI: ${MPI_CXX_LIBRARIES}") ELSE() SET(MPI_BUILTIN ON) diff --git a/cmake/ThirdPartyMetis.cmake b/cmake/ThirdPartyMetis.cmake index 6df77e374cc33bbc9d014775b2bd62afb7eea9da..1399de49e9c65820dda003962125b0a1c81266aa 100644 --- a/cmake/ThirdPartyMetis.cmake +++ b/cmake/ThirdPartyMetis.cmake @@ -40,14 +40,8 @@ IF (CMAKE_CXX_COMPILER_ID MATCHES "Clang") ENDIF() ENDIF() -SET(METIS_LIB metis CACHE FILEPATH "METIS library" FORCE) +THIRDPARTY_LIBRARY(METIS_LIB STATIC metis DESCRIPTION "Metis library") MARK_AS_ADVANCED(METIS_LIB) +MESSAGE(STATUS "Build Metis: ${METIS_LIB}") -LINK_DIRECTORIES(${TPDIST}/lib) -INCLUDE_DIRECTORIES(${TPDIST}/include) - -IF (WIN32) - MESSAGE(STATUS "Build Metis: ${TPDIST}/${LIB_DIR}/${METIS_LIB}.dll") -ELSE () - MESSAGE(STATUS "Build Metis: ${TPDIST}/${LIB_DIR}/lib${METIS_LIB}.a") -ENDIF() +INCLUDE_DIRECTORIES(${TPDIST}/include) \ No newline at end of file diff --git a/cmake/ThirdPartyOCE.cmake b/cmake/ThirdPartyOCE.cmake index 5a6f726295ff752286bf6281063fe1b18f1b3ca6..f4a32e72703ad735a94f6fba9040f81b097bc416 100644 --- a/cmake/ThirdPartyOCE.cmake +++ b/cmake/ThirdPartyOCE.cmake @@ -65,10 +65,10 @@ IF(NEKTAR_USE_MESHGEN) MESSAGE(STATUS "Build OpenCascade community edition: ${TPDIST}/lib") LINK_DIRECTORIES(${TPDIST}/lib) - INCLUDE_DIRECTORIES(SYSTEM ${TPDIST}/include/oce) + INCLUDE_DIRECTORIES(${TPDIST}/include/oce) ELSE() ADD_CUSTOM_TARGET(oce-0.17 ALL) SET(OPENCASCADE_CONFIG_INCLUDE_DIR ${OCC_INCLUDE_DIR}) - INCLUDE_DIRECTORIES(SYSTEM ${OCC_INCLUDE_DIR}) + INCLUDE_DIRECTORIES(${OCC_INCLUDE_DIR}) ENDIF() ENDIF() diff --git a/cmake/ThirdPartyPETSc.cmake b/cmake/ThirdPartyPETSc.cmake index ea3abc1c9346ed61b2552523530834eb76dfd597..4f68d0c826ec360e8ad5a80528221f2f544c78db 100644 --- a/cmake/ThirdPartyPETSc.cmake +++ b/cmake/ThirdPartyPETSc.cmake @@ -106,13 +106,11 @@ IF (NEKTAR_USE_PETSC) ${PETSC_NO_MPI} BUILD_COMMAND MAKEFLAGS= make) - SET(PETSC_LIBRARIES petsc CACHE FILEPATH - "PETSc library" FORCE) + THIRDPARTY_LIBRARY(PETSC_LIBRARIES SHARED petsc + DESCRIPTION "PETSc library") SET(PETSC_INCLUDES ${TPDIST}/include CACHE FILEPATH "PETSc includes" FORCE) - - LINK_DIRECTORIES(${TPDIST}/lib) - MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.so") + MESSAGE(STATUS "Build PETSc: ${PETSC_LIBRARIES}") SET(PETSC_CONFIG_INCLUDE_DIR ${TPINC}) ELSE (THIRDPARTY_BUILD_PETSC) INCLUDE(FindPETSc) @@ -127,9 +125,9 @@ IF (NEKTAR_USE_PETSC) ENDIF (THIRDPARTY_BUILD_PETSC) ADD_DEFINITIONS(-DNEKTAR_USING_PETSC) - INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}) + INCLUDE_DIRECTORIES(${PETSC_INCLUDES}) IF (NOT NEKTAR_USE_MPI) - INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/petsc/mpiuni) + INCLUDE_DIRECTORIES(${PETSC_INCLUDES}/petsc/mpiuni) ENDIF (NOT NEKTAR_USE_MPI) MARK_AS_ADVANCED(PETSC_CURRENT PETSC_DIR PETSC_LIBRARIES PETSC_INCLUDES) diff --git a/cmake/ThirdPartySMV.cmake b/cmake/ThirdPartySMV.cmake deleted file mode 100644 index b88ffd13e2d450614d0ac34195a4f134d2ae8c26..0000000000000000000000000000000000000000 --- a/cmake/ThirdPartySMV.cmake +++ /dev/null @@ -1,34 +0,0 @@ -OPTION(THIRDPARTY_BUILD_SMV "Build LibSMV" OFF) - -IF (THIRDPARTY_BUILD_SMV) - INCLUDE(ExternalProject) - EXTERNALPROJECT_ADD( - libsmvf1.0 - URL ${TPURL}/libsmvf1.0.tar.gz - URL_MD5 "40cad0538acebd4aa83136ef9319150e" - STAMP_DIR ${TPBUILD}/stamp - DOWNLOAD_DIR ${TPSRC} - SOURCE_DIR ${TPSRC}/libsmvf1.0 - BINARY_DIR ${TPBUILD}/libsmvf1.0 - TMP_DIR ${TPBUILD}/libsmvf1.0-tmp - INSTALL_DIR ${TPDIST} - CONFIGURE_COMMAND ${CMAKE_COMMAND} - -DCMAKE_INSTALL_PREFIX:PATH=${TPDIST} ${TPSRC}/libsmvf1.0 - INSTALL_COMMAND echo "LibSMV compiled successfully" - ) - SET(SMV smv CACHE FILEPATH "Path to LibSMV." FORCE) - - MARK_AS_ADVANCED(SMV) - LINK_DIRECTORIES(${TPDIST}/lib) - MESSAGE(STATUS "Build LibSMV: ${TPDIST}/lib/lib${SMV}.a") - SET(NEKTAR_USING_SMV TRUE) - ADD_DEFINITIONS(-DNEKTAR_USING_SMV) -ELSE (THIRDPARTY_BUILD_SMV) - IF (NEKTAR_USE_SMV) - INCLUDE(FindSMV) - IF (SMV_FOUND) - MESSAGE(STATUS "Found LibSMV: ${SMV_LIBRARY}") - ENDIF(SMV_FOUND) - ENDIF (NEKTAR_USE_SMV) -ENDIF (THIRDPARTY_BUILD_SMV) - diff --git a/cmake/ThirdPartyScotch.cmake b/cmake/ThirdPartyScotch.cmake index 3f257ec1c63e31216a374e30a14ea320f1167f71..b02603f52150af9013b1011692e15245538adfcf 100644 --- a/cmake/ThirdPartyScotch.cmake +++ b/cmake/ThirdPartyScotch.cmake @@ -79,16 +79,13 @@ IF (NEKTAR_USE_SCOTCH) prefix=${TPDIST} install ) - SET(SCOTCH_LIBRARY scotch CACHE FILEPATH - "Scotch library" FORCE) - SET(SCOTCHERR_LIBRARY scotcherr CACHE FILEPATH - "Scotch error library" FORCE) + THIRDPARTY_LIBRARY(SCOTCH_LIBRARY STATIC scotch + DESCRIPTION "Scotch library") + THIRDPARTY_LIBRARY(SCOTCHERR_LIBRARY STATIC scotcherr + DESCRIPTION "Scotch error library") SET(SCOTCH_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH "Scotch include directory" FORCE) - - LINK_DIRECTORIES(${TPDIST}/lib) - - MESSAGE(STATUS "Build Scotch: ${TPDIST}/lib/lib${SCOTCH_LIBRARY}.a") + MESSAGE(STATUS "Build Scotch: ${SCOTCH_LIBRARY}") SET(SCOTCH_CONFIG_INCLUDE_DIR ${TPINC}) ELSE (THIRDPARTY_BUILD_SCOTCH) ADD_CUSTOM_TARGET(scotch-6.0.0 ALL) diff --git a/cmake/ThirdPartyTetGen.cmake b/cmake/ThirdPartyTetGen.cmake index cc5d26e44a19ef7e69d50ea0e9b2465732720ad4..770f7fa256f29705a2cad69704b0bc1bfec46949 100644 --- a/cmake/ThirdPartyTetGen.cmake +++ b/cmake/ThirdPartyTetGen.cmake @@ -32,21 +32,11 @@ IF(NEKTAR_USE_MESHGEN) -DCMAKE_INSTALL_PREFIX:PATH=${TPDIST} ${TPSRC}/tetgen-1.5 ) - SET(TETGEN_LIBRARY tetgen CACHE FILEPATH - "TetGen library" FORCE) + THIRDPARTY_LIBRARY(TETGEN_LIBRARY STATIC tetgen + DESCRIPTION "Tetgen library") SET(TETGEN_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH "TetGen include" FORCE) - - LINK_DIRECTORIES(${TPDIST}/lib) - - IF (WIN32) - MESSAGE(STATUS - "Build TetGen: ${TPDIST}/${LIB_DIR}/${TETGEN_LIBRARY}.dll") - ELSE () - MESSAGE(STATUS - "Build TetGen: ${TPDIST}/${LIB_DIR}/lib${TETGEN_LIBRARY}.a") - ENDIF () - + MESSAGE(STATUS "Build TetGen: ${TETGEN_LIBRARY}") SET(TETGEN_CONFIG_INCLUDE_DIR ${TPINC}) ELSE() ADD_CUSTOM_TARGET(tetgen-1.5 ALL) @@ -54,5 +44,7 @@ IF(NEKTAR_USE_MESHGEN) SET(TRIANGLE_CONFIG_INCLUDE_DIR ${TETGEN_INCLUDE_DIR}) ENDIF (THIRDPARTY_BUILD_TETGEN) - INCLUDE_DIRECTORIES(SYSTEM ${TETGEN_INCLUDE_DIR}) + MARK_AS_ADVANCED(TETGEN_LIBRARY) + MARK_AS_ADVANCED(TETGEN_INCLUDE_DIR) + INCLUDE_DIRECTORIES(${TETGEN_INCLUDE_DIR}) ENDIF(NEKTAR_USE_MESHGEN) diff --git a/cmake/ThirdPartyTinyxml.cmake b/cmake/ThirdPartyTinyxml.cmake index bf0e63ab047745bb042019ca9791b86d56a000b0..a19532ac92469ff3a2c90536728e6e7744d83b4f 100644 --- a/cmake/ThirdPartyTinyxml.cmake +++ b/cmake/ThirdPartyTinyxml.cmake @@ -43,22 +43,13 @@ IF (THIRDPARTY_BUILD_TINYXML) -DCMAKE_INSTALL_PREFIX:PATH=${TPDIST} -DCMAKE_CXX_FLAGS:STRING=-DTIXML_USE_STL ${TPSRC}/tinyxml-2.6.2 - ) - SET(TINYXML_LIBRARY tinyxml CACHE FILEPATH - "TinyXML library" FORCE) + ) + + THIRDPARTY_LIBRARY(TINYXML_LIBRARY STATIC tinyxml + DESCRIPTION "TinyXML library") SET(TINYXML_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH "TinyXML include" FORCE) - - LINK_DIRECTORIES(${TPDIST}/lib) - - IF (WIN32) - MESSAGE(STATUS - "Build TinyXML: ${TPDIST}/${LIB_DIR}/${TINYXML_LIBRARY}.dll") - ELSE () - MESSAGE(STATUS - "Build TinyXML: ${TPDIST}/${LIB_DIR}/lib${TINYXML_LIBRARY}.a") - ENDIF () - + MESSAGE(STATUS "Build TinyXML: ${TINYXML_LIBRARY}") SET(TINYXML_CONFIG_INCLUDE_DIR ${TPINC}) ELSE() ADD_CUSTOM_TARGET(tinyxml-2.6.2 ALL) @@ -66,7 +57,7 @@ ELSE() SET(TINYXML_CONFIG_INCLUDE_DIR ${TINYXML_INCLUDE_DIR}) ENDIF (THIRDPARTY_BUILD_TINYXML) -INCLUDE_DIRECTORIES(SYSTEM ${TINYXML_INCLUDE_DIR}) +INCLUDE_DIRECTORIES(${TINYXML_INCLUDE_DIR}) MARK_AS_ADVANCED(TINYXML_INCLUDE_DIR) MARK_AS_ADVANCED(TINYXML_LIBRARY) diff --git a/cmake/ThirdPartyVTK.cmake b/cmake/ThirdPartyVTK.cmake index fa0d7e8b9e030fc06c5e2797859f182ed0470ed5..cd1baf4db89b39554342c20f1c3dab6d6613be1e 100644 --- a/cmake/ThirdPartyVTK.cmake +++ b/cmake/ThirdPartyVTK.cmake @@ -36,6 +36,9 @@ IF( NEKTAR_USE_VTK ) FIND_PACKAGE(VTK) IF (VTK_FOUND) MESSAGE(STATUS "Found VTK: ${VTK_USE_FILE}") + IF (VTK_MAJOR_VERSION EQUAL 6 AND VTK_MINOR_VERSION EQUAL 0 AND VTK_BUILD_VERSION EQUAL 0) + ADD_DEFINITIONS(-DNEKTAR_HAS_VTK_6_0_0) + ENDIF() ELSE (VTK_FOUND) MESSAGE(FATAL_ERROR "VTK not found") ENDIF (VTK_FOUND) diff --git a/cmake/ThirdPartyZlib.cmake b/cmake/ThirdPartyZlib.cmake index c50d5e48ab32c67d46947981aacf1f614677c848..25551715e1155a12e4e489e959a0114ad40481f2 100644 --- a/cmake/ThirdPartyZlib.cmake +++ b/cmake/ThirdPartyZlib.cmake @@ -62,25 +62,23 @@ IF (THIRDPARTY_BUILD_ZLIB) ENDIF () IF (WIN32) - SET(ZLIB_LIBRARY zlib CACHE FILEPATH - "Zlib library" FORCE) - SET(ZLIB_LIBRARY_DEBUG zlibd CACHE FILEPATH - "Zlib library" FORCE) - SET(ZLIB_LIBRARY_RELEASE zlib CACHE FILEPATH - "Zlib library" FORCE) - MESSAGE(STATUS "Build Zlib: ${TPDIST}/${LIB_DIR}/${ZLIB_LIBRARY}.dll") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY STATIC zlib + DESCRIPTION "Zlib library") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY_DEBUG STATIC zlibd + DESCRIPTION "Zlib library") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY_RELEASE STATIC zlib + DESCRIPTION "Zlib library") ELSE () - SET(ZLIB_LIBRARY z CACHE FILEPATH - "Zlib library" FORCE) - SET(ZLIB_LIBRARY_DEBUG z CACHE FILEPATH - "Zlib library" FORCE) - SET(ZLIB_LIBRARY_RELEASE z CACHE FILEPATH - "Zlib library" FORCE) - MESSAGE(STATUS "Build Zlib: ${TPDIST}/${LIB_DIR}/lib${ZLIB_LIBRARY}.a") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY SHARED z + DESCRIPTION "Zlib library") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY_DEBUG SHARED z + DESCRIPTION "Zlib library") + THIRDPARTY_LIBRARY(ZLIB_LIBRARY_RELEASE SHARED z + DESCRIPTION "Zlib library") ENDIF () + MESSAGE(STATUS "Build Zlib: ${ZLIB_LIBRARY}") SET(ZLIB_INCLUDE_DIR ${TPDIST}/include CACHE PATH "Zlib include" FORCE) - LINK_DIRECTORIES(${TPDIST}/lib) SET(ZLIB_CONFIG_INCLUDE_DIR ${TPINC}) ELSE (THIRDPARTY_BUILD_ZLIB) ADD_CUSTOM_TARGET(zlib-1.2.7 ALL) @@ -88,4 +86,8 @@ ELSE (THIRDPARTY_BUILD_ZLIB) SET(ZLIB_CONFIG_INCLUDE_DIR ${ZLIB_INCLUDE_DIR}) ENDIF (THIRDPARTY_BUILD_ZLIB) -INCLUDE_DIRECTORIES(SYSTEM ${ZLIB_INCLUDE_DIR}) +MARK_AS_ADVANCED(ZLIB_LIBRARY) +MARK_AS_ADVANCED(ZLIB_LIBRARY_DEBUG) +MARK_AS_ADVANCED(ZLIB_LIBRARY_RELEASE) +MARK_AS_ADVANCED(ZLIB_INCLUDE_DIR) +INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR}) diff --git a/docs/doxygen/Doxyfile.in b/docs/doxygen/Doxyfile.in index 7555aee0ae2e40e91c728fbd47756856a0ed26d9..13f6460e10bfba341159a6d5945590086e8d6cb6 100644 --- a/docs/doxygen/Doxyfile.in +++ b/docs/doxygen/Doxyfile.in @@ -757,6 +757,7 @@ INPUT = @CMAKE_SOURCE_DIR@/docs/doxygen/ \ @CMAKE_SOURCE_DIR@/library/LibUtilities/ \ @CMAKE_SOURCE_DIR@/library/StdRegions/ \ @CMAKE_SOURCE_DIR@/library/SpatialDomains/ \ + @CMAKE_SOURCE_DIR@/library/Collections/ \ @CMAKE_SOURCE_DIR@/library/LocalRegions/ \ @CMAKE_SOURCE_DIR@/library/MultiRegions/ \ @CMAKE_SOURCE_DIR@/library/GlobalMapping/ \ diff --git a/docs/user-guide/installation/source.tex b/docs/user-guide/installation/source.tex index 3f69045c5fc940774bfb4c01ce8abd4de873eb59..d347afd2b5c772cc061a4337a8efe8afb6529505 100644 --- a/docs/user-guide/installation/source.tex +++ b/docs/user-guide/installation/source.tex @@ -640,10 +640,6 @@ automatically built during the \nekpp build process. Below are the choices of X: (MPI-only) Parallel communication library. - \item \inlsh{LOKI} - - An implementation of a singleton. - \item \inlsh{METIS} A graph partitioning library used for substructuring of matrices and mesh diff --git a/docs/user-guide/solvers/compressible-flow.tex b/docs/user-guide/solvers/compressible-flow.tex index a65009ac6bdc7984fb9444ae4ca3c15de0570f77..774002280e2f19d3430c6edf97756747a83b3f6a 100644 --- a/docs/user-guide/solvers/compressible-flow.tex +++ b/docs/user-guide/solvers/compressible-flow.tex @@ -221,7 +221,7 @@ to screen; \item \inltt{TInf} farfield temperature (i.e. $T_{\infty}$). Default value = 288.15 $K$; \item \inltt{Twall} temperature at the wall when isothermal boundary conditions are employed (i.e. $T_{w}$). Default value = 300.15$K$; -\item \inltt{uint} farfield $X$-component of the velocity (i.e. $u_{\infty}$). Default value = 0.1 $m/s$; +\item \inltt{uInf} farfield $X$-component of the velocity (i.e. $u_{\infty}$). Default value = 0.1 $m/s$; \item \inltt{vInf} farfield $Y$-component of the velocity (i.e. $v_{\infty}$). Default value = 0.0 $m/s$; \item \inltt{wInf} farfield $Z$-component of the velocity (i.e. $w_{\infty}$). Default value = 0.0 $m/s$; \item \inltt{mu} dynamic viscosity (i.e. $\mu_{\infty}$). Default value = 1.78e-05 $Pa s$; @@ -437,6 +437,7 @@ Compressible flow is characterised by abrupt changes in density within the flow \begin{equation}\label{eq:sensor} S_e=\frac{||\rho^p_e-\rho^{p-1}_e||_{L_2}}{||\rho_e^p||_{L_2}} \end{equation} +by default the comparison is made with the $p-1$ solution, but this can be changed by setting the parameter \inltt{SensorOffset}. An artificial diffusion term is introduced locally to the Euler equations to deal with flow discontinuity and the consequential numerical oscillations. Two models are implemented, a non-smooth and a smooth artificial viscosity model. \subsubsection{Non-smooth artificial viscosity model} For the non-smooth artificial viscosity model the added artificial viscosity is constant in each element and discontinuous between the elements. The Euler system is augmented by an added laplacian term on right hand side of equation \ref{eq:euler}. The diffusivity of the system is controlled by a variable viscosity coefficient $\epsilon$. The value of $\epsilon$ is dependent on $\epsilon_0$, which is the maximum viscosity that is dependent on the polynomial order ($p$), the mesh size ($h$) and the maximum wave speed and the local sensor value. Based on pre-defined sensor threshold values, the variable viscosity is set accordingly @@ -512,4 +513,53 @@ The polynomial order in each element can be adjusted based on the sensor value t \end{equation} For now, the threshold values $s_e$, $s_{ds}$, $s_{sm}$ and $s_{fl}$ are determined empirically by looking at the sensor distribution in the domain. Once these values are set, two .txt files are outputted, one that has the composites called VariablePComposites.txt and one with the expansions called VariablePExpansions.txt. These values have to copied into a new .xml file to create the adapted mesh. +\subsection{Quasi-1D nozzle flow} +A quasi-1D inviscid flow (flow with area variation) can be obtained using the +\inltt{Quasi1D} forcing in a 1D solution of the Euler equations: +\begin{lstlisting}[style=XMLStyle] + + + Area + + +\end{lstlisting} +in this case a function named \inltt{Area} must be specified in the \inltt{CONDITIONS} section of the session file. + +In this case, it is possible to prescribe the inflow conditions in terms of stagnation properties (density and pressure) +by using the following boundary condition +\begin{lstlisting}[style=XmlStyle] + + + + + + + +\end{lstlisting} + +\subsection{Axi-symmetric flow} +An axi-symmetric inviscid flow (with symmetry axis on x=0) can be obtained using +the \inltt{AxiSymmetric} forcing in a 2D solution of the Euler equations: +\begin{lstlisting}[style=XMLStyle] + + + + +\end{lstlisting} +The \inltt{StagnationInflow} boundary condition can also be used in this case. + +Also, by defining the geometry with \inltt{} (i.e. a two-dimensional +mesh in three-dimensional space) and adding the \inltt{rhow} variable, we obtain an axi-symmetric +flow with swirl, in which case the \inltt{StagnationInflow} boundary condition allows prescribing \inltt{rhow}: +\begin{lstlisting}[style=XmlStyle] + + + + + + + + + +\end{lstlisting} diff --git a/docs/user-guide/utilities/fieldconvert.tex b/docs/user-guide/utilities/fieldconvert.tex index 30619ff4214eea59c73dbfe6c934ce0751b07711..17c3c0e4dbf1855aa1edf2928f04a10891ddedfb 100644 --- a/docs/user-guide/utilities/fieldconvert.tex +++ b/docs/user-guide/utilities/fieldconvert.tex @@ -1,16 +1,16 @@ \chapter{FieldConvert} \label{s:utilities:fieldconvert} -FieldConvert is a utility embedded in \nekpp with the primary aim of -allowing the user to convert the \nekpp output binary files (.chk and -.fld) into a format which can be read by two common visualisation -softwares: Paraview/VisIt (.vtu format) or Tecplot/VisIt (.dat or .plt -formats). FieldConvert also allows the user to manipulate the \nekpp -output binary files by using some additional modules which can be -called with the option \inltt{-m} which stands for -\inltt{m}odule. Note that another flag, \inltt{-r} (which stand for -\inltt{r}ange) allows the user to specify a sub-range of the domain on -which the conversion or manipulation of the \nekpp output binary files -will be performed. +FieldConvert is a utility embedded in \nekpp with the primary aim of allowing +the user to convert the \nekpp output binary files (\inltt{.chk} and +\inltt{.fld}) into formats which can be read by common visualisation and +post-processing software, primarily Paraview/VisIt (in unstructured VTK +\inltt{.vtu} format) or Tecplot/VisIt (in ASCII \inltt{.dat} or binary +\inltt{.plt} formats). FieldConvert also allows the user to manipulate the +\nekpp output binary files by using some additional modules which can be called +with the option \inltt{-m} which stands for \inltt{m}odule. Note that another +flag, \inltt{-r} (which stand for \inltt{r}ange) allows the user to specify a +sub-range of the domain on which the conversion or manipulation of the \nekpp +output binary files will be performed. Almost all of the FieldConvert functionalities can be run in parallel if \nekpp is compiled using MPI (see the installation documentation for additional info on @@ -19,6 +19,49 @@ how to implement \nekpp using MPI). \footnote{Modules that do not have parallel % % % +\section{Basic usage} +FieldConvert expects at least one input specification (such as a session file +and its corresponding field file) and one output specification. These are +specified on the command line as +% +\begin{lstlisting}[style=BashInputStyle] + FieldConvert in1.xml in2.fld out.dat +\end{lstlisting} +% +These can be combined with a processing module by adding the \inltt{-m} command +line option. There can be more than one module specified, and they can appear +anywhere in the command line arguments, although the order of execution is +inferred from their order in the command line. For example, the command +% +\begin{lstlisting}[style=BashInputStyle] + FieldConvert in1.xml -m module1 in2.fld -m module2 out.dat +\end{lstlisting} +% +causes \inltt{in1.xml} and \inltt{in2.fld} to be read, followed by the +\inltt{module1} processing module, the \inltt{module2} processing module, and +finally output to the \inltt{out.dat} Tecplot file. + +\subsection{Input formats} + +FieldConvert supports XML and FLD-format files as produced by \nekpp. It also +supports the reading of data files from two external spectral element codes: +\emph{Semtex}\footnote{http://users.monash.edu.au/~bburn/semtex.html} and +\emph{Nek5000}\footnote{https://nek5000.mcs.anl.gov}. These files can be +directly converted to \nekpp format files by using the command +% +\begin{lstlisting}[style=BashInputStyle] + FieldConvert input.fld output.fld +\end{lstlisting} +% +Note that even though the \inltt{.fld} extension is typically associated with +\nekpp files, FieldConvert can automatically identify \emph{Semtex} and +\emph{Nek5000} input field files. + +To use these files in a simulation, or to post-process the results of a +simulation, an appropriate mesh must also be defined in the \nekpp XML format. +\nm can be used to convert these input files to XML, as outlined in +section~\ref{s:utilities:nekmesh}. + \section{Convert .fld / .chk files into Paraview, VisIt or Tecplot format} \label{s:utilities:fieldconvert:sub:convert} To convert the \nekpp output binary files (.chk and .fld) into a @@ -106,7 +149,7 @@ to define the $z$ range. \section{FieldConvert modules \textit{-m}} FieldConvert allows the user to manipulate the \nekpp output binary files (.chk and .fld) by using the flag \inltt{-m} (which -stands for \inltt{m}odule).. +stands for \inltt{m}odule). Specifically, FieldConvert has these additional functionalities % \begin{enumerate} @@ -116,7 +159,7 @@ Specifically, FieldConvert has these additional functionalities \item \inltt{addFld}: Sum two .fld files; \item \inltt{combineAvg}: Combine two \nekpp binary output (.chk or .fld) field file containing averages of fields (and possibly also Reynolds stresses) into single file; -\item \inltt{concatenate}: Concatenate a \nekpp binary output (.chk or .fld) field file into single file; +\item \inltt{concatenate}: Concatenate a \nekpp binary output (.chk or .fld) field file into single file (deprecated); \item \inltt{equispacedoutput}: Write data as equi-spaced output using simplices to represent the data for connecting points; \item \inltt{extract}: Extract a boundary field; \item \inltt{homplane}: Extract a plane from 3DH1D expansions; @@ -260,17 +303,19 @@ To concatenate \inltt{file1.fld} and \inltt{file2.fld} into \inltt{file-conc.fld one can run the following command % \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m concatenate file.xml file1.fld file2.fld file-conc.fld +FieldConvert file.xml file1.fld file2.fld file-conc.fld \end{lstlisting} % where the file \inltt{file-conc.fld} can be processed in a similar way as described in section \ref{s:utilities:fieldconvert:sub:convert} -to visualise the result either in Tecplot, Paraview or VisIt. +to visualise the result either in Tecplot, Paraview or VisIt. The +\inltt{concatenate} module previously used for this purpose is not +required anymore, and will be removed in a future release. % % % \subsection{Equi-spaced output of data: \textit{equispacedoutput} module} -This module interpolates the output data to an truly equispaced set of +This module interpolates the output data to a truly equispaced set of points (not equispaced along the collapsed coordinate system). Therefore a tetrahedron is represented by a tetrahedral number of poinst. This produces much smaller output files. The points @@ -297,24 +342,21 @@ The boundary region of a domain can be extracted from the output data using the following command line % \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m extract:bnd=2:fldtoboundary=1 test.xml \ +FieldConvert -m extract:bnd=2 test.xml \ test.fld test-boundary.fld \end{lstlisting} % The option \inltt{bnd} specifies which boundary region to extract. Note this is different to NekMesh where the parameter \inltt{surf} is specified and corresponds to composites rather boundaries. If \inltt{bnd} -is not provided, all boundaries are extracted to different fields. The \inltt{fldtoboundary} -is an optional command argument which copies the expansion of test.fld into -the boundary region before outputting the .fld file. This option is on by default. -If it turned off using \inltt{fldtoboundary=0} the extraction will only evaluate the -boundary condition from the xml file. The output will be placed in test-boundary-b2.fld. -If more than one boundary region is specified the extension -b0.fld, -b1.fld etc will be +is not provided, all boundaries are extracted to different fields. +The output will be placed in test-boundary\_b2.fld. +If more than one boundary region is specified the extension \_b0.fld, \_b1.fld etc will be outputted. To process this file you will need an xml file of the same region. This can be generated using the command: % \begin{lstlisting}[style=BashInputStyle] -NekMesh -m extract:surf=5 test.xml test-b0.xml +NekMesh -m extract:surf=5 test.xml test\_b0.xml \end{lstlisting} % The surface to be extracted in this command is the composite @@ -322,7 +364,7 @@ number and so needs to correspond to the boundary region of interest. Finally to process the surface file one can use % \begin{lstlisting}[style=BashInputStyle] -FieldConvert test-b0.xml test-b0.fld test-b0.dat +FieldConvert test\_b0.xml test\_b0.fld test\_b0.dat \end{lstlisting} % This will obviously generate a Tecplot output if a .dat file @@ -370,9 +412,7 @@ To stretch a 3DH1D expansion in the z-direction, use the command: FieldConvert -m homstretch:factor=value file.xml file.fld file-stretch.fld \end{lstlisting} The number of modes in the resulting field can be chosen using the command-line -parameter \inltt{output-points-hom-z}. Note that the output for -this module should always be a \inltt{.fld} file and this should not -be used in combination with other modules using a single command. +parameter \inltt{output-points-hom-z}. The output file \inltt{file-stretch.fld} can be processed in a similar way as described in section \ref{s:utilities:fieldconvert:sub:convert} @@ -394,7 +434,7 @@ determine the inner product of these fields. The input option Optional arguments for this module are \inltt{fields} which allow you to specify the fields that you wish to use for the inner product, i.e. \begin{lstlisting}[style=BashInputStyle] - FieldConvert -m innerproduct:fromfld=file1.fld:fields=''0,1,2'' file2.xml \ + FieldConvert -m innerproduct:fromfld=file1.fld:fields="0,1,2" file2.xml \ file2.fld out.stdout \end{lstlisting} will only take the inner product between the variables 0,1 and 2 in @@ -405,7 +445,7 @@ Additional options include \inltt{multifldids} and \inltt{allfromflds} which allow for a series of fields to be evaluated in the following manner: \begin{lstlisting}[style=BashInputStyle] - FieldConvert -m innerproduct:fromfld=file1.fld:multifldids=''0-3''\ + FieldConvert -m innerproduct:fromfld=file1.fld:multifldids="0-3"\ file2.xml file2.fld out.stdout \end{lstlisting} will take the inner product between a file names @@ -414,7 +454,7 @@ respect to field2.fld. Analogously including the options \inltt{allfromflds}, i.e. \begin{lstlisting}[style=BashInputStyle] - FieldConvert -m innerproduct:fromfld=file1.fld:multifldids=''0-3'':\ + FieldConvert -m innerproduct:fromfld=file1.fld:multifldids="0-3":\ allfromflds file2.xml file2.fld out.stdout \end{lstlisting} Will take the inner product of all the from fields, @@ -460,7 +500,7 @@ faster. To interpolate discrete point data to a field, use the interppointdatatofld module: % \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m interppointdatatofld file1.xml file1.pts file1.fld +FieldConvert -m interppointdatatofld:frompts=file1.pts file1.xml file1.fld \end{lstlisting} or alternatively for csv data: \begin{lstlisting}[style=BashInputStyle] @@ -503,8 +543,8 @@ In order to interpolate 1D data to a $n$D field, specify the matching coordinate the output field using the \inltt{interpcoord} argument: % \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m interppointdatatofld:interppointdatatofld=1 3D-file1.xml \ - 1D-file1.pts 3D-file1.fld +FieldConvert -m interppointdatatofld:frompts=1D-file1.pts:interppointdatatofld=1 \ + 3D-file1.xml 3D-file1.fld \end{lstlisting} % This will interpolate the 1D scattered point data from \inltt{1D-file1.pts} to the @@ -522,8 +562,8 @@ The Inverse Distance implementation has no such requirement. \subsection{Interpolate a field to a series of points: \textit{interppoints} module} You can interpolate one field to a series of given points using the following command: \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m interppoints:fromxml=file1.xml:fromfld=file1.fld \ - file2.pts file2.dat +FieldConvert -m interppoints:fromxml=file1.xml:fromfld=\ + file1.fld:topts=file2.pts file2.dat \end{lstlisting} This command will interpolate the field defined by \inltt{file1.xml} and \inltt{file1.fld} to the points defined in \inltt{file2.pts} and output it to @@ -583,15 +623,14 @@ where \inltt{npts1,npts2,npts3} is the number of equispaced points in each direction and $(xmin,ymin,zmin)$ and $(xmax,ymax,zmax3)$ define the limits of the box of points. -For the plane and box interpolation there is an additional optional -argument \inltt{cp=p0,q} which adds to the interpolated fields the value of -$c_p=(p-p0)/q$ and $c_{p0}=(p-p0+0.5 u^2)/q$ where $p0$ is a reference -pressure and $q$ is the free stream dynamics pressure. If the input -does not contain a field ``p'' or a velocity field ``u,v,w'' then $cp$ +There is also an additional optional argument \inltt{cp=p0,q} which adds to the +interpolated fields the value of $c_p=(p-p0)/q$ and $c_{p0}=(p-p0+0.5 u^2)/q$ +where $p0$ is a reference pressure and $q$ is the free stream dynamics pressure. +If the input does not contain a field ``p'' or a velocity field ``u,v,w'' then $cp$ and $cp0$ are not evaluated accordingly % \begin{notebox} -This module runs in parallel for the plane and box extraction of points. In this case a series of .dat files are generated that can be concatinated together. Other options do not run in parallel. +This module runs in parallel for the line, plane and box extraction of points. \end{notebox} % % @@ -611,9 +650,9 @@ isocontour to be extracted. test-isocontour.dat \end{lstlisting} -Alternatively \inltt{fieldstr}=''u+v'' can be specified to calculate -the field $u^2$ and extract its isocontour. You can also specify -\inltt{fieldname}=''UplusV'' to define the name of the isocontour in +Alternatively \inltt{fieldstr="u+v"} can be specified to calculate +the field $u+v$ and extract its isocontour. You can also specify +\inltt{fieldname="UplusV"} to define the name of the isocontour in the .dat file, i.e. \begin{lstlisting}[style=BashInputStyle] FieldConvert -m isocontour:fieldstr="u+v":fieldvalue=0.5:\ @@ -713,7 +752,7 @@ to visualise the result either in Tecplot or in Paraview or VisIt. To project a series of points given at the same quadrature distribution as the .xml file and write out a .fld file use the pointdatatofld module: % \begin{lstlisting}[style=BashInputStyle] -FieldConvert --noequispaced -m pointdatatofld file.pts file.xml file.fld +FieldConvert -m pointdatatofld:frompts=file.pts file.xml file.fld \end{lstlisting} % This command will read in the points provided in the \inltt{file.pts} @@ -745,19 +784,19 @@ data, the $n$ coordinates are specified in the first $n$ columns accordingly followed by the field data. Alternatively, the \inltt{file.pts} can be interchanged with a csv file. -The default argument is to use the equipapced (but potentially +The default argument is to use the equispaced (but potentially collapsed) coordinates which can be obtained from the command. \begin{lstlisting}[style=BashInputStyle] FieldConvert file.xml file.dat \end{lstlisting} -In this case the pointdatatofld module shoudl be used without the +In this case the pointdatatofld module should be used without the \inltt{--noequispaced} option. However this can lead to problems when peforming an elemental forward projection/transform since the mass matrix in a deformed element can be singular as the equispaced points do not have a sufficiently accurate quadrature rule that spans the -polynomial space. Therefore it is adviseable to use the set of points +polynomial space. Therefore it is advisable to use the set of points given by \begin{lstlisting}[style=BashInputStyle] @@ -765,8 +804,7 @@ FieldConvert --noequispaced file.xml file.dat \end{lstlisting} which produces a set of points at the gaussian collapsed -coordinates. In this case one must also use the \inltt{--noequispaced} -option when projecting to a field. +coordinates. Finally the option \inltt{setnantovalue=0} can also be used which sets any nan values in the interpolation to zero or any specified value in @@ -803,11 +841,11 @@ The option \inltt{bnd} specifies which boundary region to extract. Note this is \subsection{Scale a given .fld: \textit{scaleinputfld} module} To scale a .fld file by a given scalar quantity, the user can run: \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m scaleinputfld:scale=value test.xml test.fld test-scal.fld +FieldConvert -m scaleinputfld:scale=value test.fld test-scal.fld \end{lstlisting} The argument \inltt{scale=value} rescales of a factor \inltt{value} \inltt{test.fld} by the factor value. -The output file \inltt{file-conc.fld} can be processed in a similar +The output file \inltt{file-scal.fld} can be processed in a similar way as described in section \ref{s:utilities:fieldconvert:sub:convert} to visualise the result either in Tecplot, Paraview or VisIt. @@ -828,7 +866,8 @@ Time-dependent wall shear stress derived metrics relevant to cardiovascular flui To compute these, the user can run: \begin{lstlisting}[style=BashInputStyle] -FieldConvert -m shear:N=value:fromfld=test_id_b0.fld test.xml test-multishear.fld +FieldConvert -m shear:N=value:fromfld=test_id_b0.fld \ + test.xml test-multishear.fld \end{lstlisting} The argument \inltt{N} and \inltt{fromfld} are compulsory arguments that respectively define the number of \inltt{fld} files corresponding to the number of discrete equispaced time-steps, and the first \inltt{fld} file which should have the form of \inltt{test\_id\_b0.fld} where the first underscore in the name marks the starting time-step file ID. @@ -840,7 +879,8 @@ The input \inltt{.fld} files are the outputs of the \textit{wss} module. If they % \subsection{Boundary layer height calculation: \textit{surfdistance} module} -The surface distance module computes the height of a prismatic boundary layer +The surface distance module computes the height of a boundary layer formed by +quadrilaterals (in 2D) or prisms and hexahedrons (in 3D) and projects this value onto the surface of the boundary, in a similar fashion to the \inltt{extract} module. In conjunction with a mesh of the surface, which can be obtained with \inltt{NekMesh}, and a value of the average wall shear @@ -877,7 +917,13 @@ To obtain the wall shear stres vector and magnitude, the user can run: \begin{lstlisting}[style=BashInputStyle] FieldConvert -m wss:bnd=0:addnormals=1 test.xml test.fld test-wss.fld \end{lstlisting} -The option \inltt{bnd} specifies which boundary region to extract. Note this is different to NekMesh where the parameter \inltt{surf} is specified and corresponds to composites rather boundaries. If \inltt{bnd} is not provided, all boundaries are extracted to different fields. The \inltt{addnormals} is an optional command argument which, when turned on, outputs the normal vector of the extracted boundary region as well as the shear stress vector and magnitude. This option is off by default. To process the output file(s) you will need an xml file of the same region. +The option \inltt{bnd} specifies which boundary region to extract. Note this is +different to NekMesh where the parameter \inltt{surf} is specified and corresponds +to composites rather boundaries. If \inltt{bnd} is not provided, all boundaries +are extracted to different fields. The \inltt{addnormals} is an optional command +argument which, when turned on, outputs the normal vector of the extracted boundary +region as well as the shear stress vector and magnitude. This option is off by default. +To process the output file(s) you will need an xml file of the same region. % % % @@ -954,9 +1000,9 @@ can be done by using the \inltt{writemultiplefiles} option, i.e. \end{lstlisting} For the \inltt{.vtu} format multiple files will by default be produced -of the form \inltt{test\_P0.vtu}, \inltt{test\_P1.vtu}, -\inltt{test\_P2.vtu}. For this format an additional file called -\inltt{.pvtu} is written out which allows for parallel reading of the +of the form \inltt{test\_vtu/P0000000.vtu}, {test\_vtu/P0000001.vtu}, +{test\_vtu/P0000002.vtu}. For this format an additional file called +\inltt{test.pvtu} is written out which allows for parallel reading of the individual \inltt{.vtu} files. FieldConvert functions that produce a \inltt{.fld} file output will @@ -970,55 +1016,81 @@ within the directory. \section{Processing large files in serial} When processing large files, it is not always convenient to run in parallel but process each parallel partition in serial, for example when interpolating a -solution field from one mesh to another. +solution field from one mesh to another or creating an output file for +visualization. -\subsection{Using the \texttt{nprocs} and \texttt{procid} options} +\subsection{Using the \textit{nparts} options} -One option is to use the \inltt{--nprocs} and \inltt{--procid} command line -options. For example, the following command will interpolate partition 2 of a -decomposition into 10 partitions of \inltt{fiile2.xml} from \inltt{file1.fld} +One option is to use the \inltt{nparts} command line +option. For example, the following command will create a +\inltt{.vtu} file using 10 partitions of \inltt{file1.xml}: \begin{lstlisting}[style=BashInputStyle] -FieldConvert --nprocs 10 --procid 2 \ - -m interpfield:fromxml=file1.xml:fromfld=file1.fld \ - file2.xml file2.fld +FieldConvert --nparts 10 file1.xml file1.fld file1.vtu \end{lstlisting} -This call will only therefore consider the interpolation process across one -partition (namely, partition 2). To create the full interpolated field requires -a loop over each of the partitions, which, in a bash shell can be run as +Note this will create a parallel vtu file as it processes each partition. + + +Another example is to interpolate \inltt{file1.fld} from one mesh +\inltt{file1.xml} to another \inltt{file2.xml}. If the mesh files are +large we can do this by partitioning \inltt{file2.xml} into 10 (or more) +partitions and interpolating each partition one by one using the +command: \begin{lstlisting}[style=BashInputStyle] -for n in `seq 0 9`; do - FieldConvert --nprocs 10 --procid $n \ - -m interpfield:fromxml=file1.xml:fromfld=file1.fld \ - file2.xml file2.fld -done + FieldConvert --nparts 10 -m interpfield:fromxml=file1.xml:fromfld=file1.fld \ + file2.xml file2.fld \end{lstlisting} +Note that internally the routine uses the range option so that it +only has to load the part of \inltt{file1.xml} that overlaps with each +partition of \inltt{file2.xml}. The resulting output will lie in a directory called \inltt{file2.fld}, with each of the different parallel partitions in files with names \inltt{P0000000.fld}, \inltt{P0000001.fld}, \dots, \inltt{P0000009.fld}. This is nearly a complete -parallel field file. However, the \inltt{Info.xml} file, which contains the -information about which elements lie in each partition, is missing. This can be -generated by using the command +parallel field file. However, when the output file is in the .fld format, +the \inltt{Info.xml} file, which contains the information about which elements +lie in each partition, is not correct since it will only contain the information for one of the partitions. The correct \inltt{Info.xml} file can be generated by using the command \begin{lstlisting}[style=BashInputStyle] -FieldConvert --nprocs 10 file2.xml file2.fld/Info.xml:info +FieldConvert file2.xml file2.fld/Info.xml:info:nparts=10 \end{lstlisting} -Note the final \inltt{:info} extension on the last argument is necessary to tell +Note the \inltt{:info} extension on the last argument is necessary to tell FieldConvert that you wish to generate an info file, but with the extension \inltt{.xml}. This syntax allows the routine not to get confused with the input/output XML files. -\subsection{Using the --part-only and --part-only-overlapping options} +\subsection{Running in parallel with the \textit{ nparts} option} + +The examples above will process each partition serially which may now +take a while for many partitions. You can however run this option in +parallel using a smaller number of cores than the nparts. + +For the example of creating a vtu file above you can use 4 processor concurrently wiht the command line: +\begin{lstlisting}[style=BashInputStyle] +mpirun -n 4 FieldConvert --nparts 10 file1.xml file1.fld file1.vtu +\end{lstlisting} + +Obviously the executable will have to have been compiled with the MPI option for this to work. -Another approach to serially proessing a large file is to initially process the -file into multiple partitions. This can be done with the \inltt{--part-only} +\subsection{Using the \textit{ part-only} and \textit{ part-only-overlapping} options} + +The options above will all load in the full \inltt{file1.xml}, partition +it into \inltt{nparts} files in a director called \inltt{file1\_xml}. +This can be expensive if the \inltt{file1.xml} is already large. So instead you can +pre-partition the file using the using the \inltt{--part-only} option. So the command \begin{lstlisting}[style=BashInputStyle] FieldConvert --part-only 10 file.xml file.fld \end{lstlisting} -will partition the mesh into 10 paritions and write each partition into a +will partition the mesh into 10 partitions and write each partition into a directory called \inltt{file\_xml}. If you enter this directory you will find partitioned XML files \inltt{P0000000.xml}, \inltt{P0000001.xml}, \dots, \inltt{P0000009.xml} which can then be processed individually as outlined above. +If you have a partitioned directory either from a parallel run or using the \inltt{--part-only} option you can now run the \inltt{FieldConvert} option using the command +\begin{lstlisting}[style=BashInputStyle] +mpirun -n 4 FieldConvert --nparts 10 file1\_xml:xml file1.fld file1.vtu +\end{lstlisting} + +Note the form \inltt{file1\_xml:xml} option tells the code it is a parallel partition which should be treated as an \inltt{xml} type file. + There is also a \inltt{--part-only-overlapping} option, which can be run in the same fashion. \begin{lstlisting}[style=BashInputStyle] diff --git a/docs/user-guide/utilities/nekmesh.tex b/docs/user-guide/utilities/nekmesh.tex index f7baf6af74bf1390dc897f922a0b6546807a9dd5..0e193e503bf2a1e4645f271ebf605e82553dd8bd 100644 --- a/docs/user-guide/utilities/nekmesh.tex +++ b/docs/user-guide/utilities/nekmesh.tex @@ -842,22 +842,22 @@ example which generates a 2D NACA wing. - - + + -

-

-

-

+

+

+

+

-

-

-

-

-

+

+

+

+

+

@@ -867,10 +867,11 @@ In all cases the mesh generator needs two pieces of information and four parameters. It firstly needs to know the CAD file with which to work. In the example above this is listed as a 4 digit number, this is because the mesh generator is equiped with a NACA wing generator. In all other cases this -parameter would be a STEP file. Secondly, what type of mesh to make, the options -are \inltt{EULER} and \inltt{BL} for 3D meshes and \inltt{2D} and \inltt{2DBL} +parameter would be the name of a CAD file (in either STEP or GEO format). +Secondly, what type of mesh to make, the options +are \inltt{EULER} and \inltt{BndLayer} for 3D meshes and \inltt{2D} and \inltt{2DBndLayer} for 2D meshes. In the case of \inltt{EULER} the mesh will be made with only -tetrahedra. For \inltt{BL} the mesh generator will attempt to insert a single +tetrahedra. For \inltt{BndLayer} the mesh generator will attempt to insert a single macro prism layer onto the geometry surface. This option requires additional parameters. This is similar for the 2D scenarios. The automatic mesh specification system requires three parameters to build the specification of a @@ -889,35 +890,35 @@ An example is shown. - - + + -

-

-

-

+

+

+

+

-

-

-

-

+

+

+

+

-

-

-

-

-

+

+

+

+

+

\end{lstlisting} A list of the CAD surfaces which will have a prism generated on is described by -\inltt{BLSurfs} and the thickness of the boundary to aim for is \inltt{BLThick}. +\inltt{BndLayerSurfaces} and the thickness of the boundary to aim for is \inltt{BndLayerThickness}. % The mesh generator has been created with a range of error messages to aid in debugging. If you encounter an error and the mesh generator fails, run \nm with @@ -925,6 +926,32 @@ the verbose \inltt{-v} flag and send the stdout with the .mcf and .step files to \inltt{m.turner14@imperial.ac.uk}. Without the feedback this functionality cannot improve. +\subsubsection{GEO format} + +Recent developments have been made to facilitate the generation of meshes from +simple 2D geometries. The GEO file format, used by Gmsh, is a popular option +that allows the user to script geometrical and meshing operations without the +need of a GUI. A simplified reader has been implemented in NekMesh for 2D geometries. +Although very basic this reader may be extended in the future to cover a wider +range of geometrical features. + +For a full description of the GEO format the user should refer to Gmsh's +documentation. The following commands are currently supported: +\begin{itemize} + \item \inltt{//} (comments) + \item \inltt{Point} + \item \inltt{Line} + \item \inltt{Spline} + \item \inltt{Line Loop} + \item \inltt{Plane Surface} +\end{itemize} + +At the present time, NekMesh does not support the full scripting capabilities of the +GEO format. The used GEO files should be a straightforward succession of entity +creations (see list above). This should however allow for the creation of quite +a wide range of 2D geometries by transformation of arbitrary curves into generic +splines. + %%% Local Variables: %%% mode: latex diff --git a/docs/user-guide/xml/xml-filters.tex b/docs/user-guide/xml/xml-filters.tex index 85f5945ab80a763bb72a0311ddf7ff9ccd4086f8..19bed6e4f55faf7ca20a734c7646a0647095eb12 100644 --- a/docs/user-guide/xml/xml-filters.tex +++ b/docs/user-guide/xml/xml-filters.tex @@ -69,9 +69,7 @@ As an example, consider: \end{lstlisting} This will create a sequence of files named \inltt{MyFile\_*\_fc.vtu} containing isocontours. -The result will be output every 100 time steps. Output directly to -\inltt{.vtu} or \inltt{.dat} is currently only supported for isocontours. -In other cases, the output should be a \inltt{.fld} file. +The result will be output every 100 time steps. \subsection{Time-averaged fields} diff --git a/library/CMakeLists.txt b/library/CMakeLists.txt index 7c8861dbc649f78eef212456de1b3405bc64a0eb..0622db07de727e5eaed97a0f6efebf4064d5d96a 100644 --- a/library/CMakeLists.txt +++ b/library/CMakeLists.txt @@ -1,31 +1,28 @@ -SET(LibrarySubDirs FieldUtils GlobalMapping LibUtilities LocalRegions - Collections MultiRegions SpatialDomains StdRegions) -SET(UnitTestSubDirs UnitTests) -SET(DemoSubDirs Demos) -SET(TimingsSubDirs Timings) -SET(SolverUtilsSubDirs SolverUtils) -SET(NekMeshUtilsSubDirs NekMeshUtils) - -SUBDIRS(${LibrarySubDirs}) - +# Main library sub-directories, required by all of Nektar++. +SUBDIRS(GlobalMapping LibUtilities LocalRegions Collections MultiRegions + SpatialDomains StdRegions) INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/library) -IF( NEKTAR_BUILD_UNIT_TESTS ) - SUBDIRS(${UnitTestSubDirs}) -ENDIF( NEKTAR_BUILD_UNIT_TESTS ) +IF (NEKTAR_BUILD_UNIT_TESTS) + SUBDIRS(UnitTests) +ENDIF() + +IF (NEKTAR_BUILD_DEMOS) + SUBDIRS(Demos) +ENDIF() -IF( NEKTAR_BUILD_DEMOS ) - SUBDIRS(${DemoSubDirs}) -ENDIF( NEKTAR_BUILD_DEMOS ) +IF (NEKTAR_BUILD_TIMINGS) + SUBDIRS(Timings) +ENDIF() -IF( NEKTAR_BUILD_TIMINGS ) - SUBDIRS(${TimingsSubDirs}) -ENDIF( NEKTAR_BUILD_TIMINGS ) +IF (NEKTAR_BUILD_SOLVERS) + SUBDIRS(SolverUtils) +ENDIF() -IF( NEKTAR_BUILD_SOLVERS ) - SUBDIRS(${SolverUtilsSubDirs}) -ENDIF( NEKTAR_BUILD_SOLVERS ) +IF (NEKTAR_UTILITY_NEKMESH) + SUBDIRS(NekMeshUtils) +ENDIF() -IF( NEKTAR_BUILD_UTILITIES ) - SUBDIRS(${NekMeshUtilsSubDirs}) -ENDIF( NEKTAR_BUILD_UTILITIES ) +IF (NEKTAR_UTILITY_FIELDCONVERT OR NEKTAR_UTILITY_NEKMESH OR NEKTAR_BUILD_SOLVERS) + SUBDIRS(FieldUtils) +ENDIF() diff --git a/library/Collections/BwdTrans.cpp b/library/Collections/BwdTrans.cpp index fbc0683d8da6612b08420f4ba899c356019f18f6..3239725b2c8387a049c1880fa773dd20399d3e72 100644 --- a/library/Collections/BwdTrans.cpp +++ b/library/Collections/BwdTrans.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include @@ -918,7 +917,7 @@ class BwdTrans_SumFac_Prism : public Operator { Blas::Dgemm('N', 'N', m_nquad2, m_numElmt, m_nmodes2-i, 1.0, m_base2.get()+mode*m_nquad2, m_nquad2, - &input[0]+mode1, totmodes, 0.0, + input.get()+mode1, totmodes, 0.0, &wsp[j*m_nquad2*m_numElmt*m_nmodes0+ cnt], m_nquad2); mode1 += m_nmodes2-i; @@ -1018,5 +1017,168 @@ OperatorKey BwdTrans_SumFac_Prism::m_type = GetOperatorFactory(). OperatorKey(ePrism, eBwdTrans, eSumFac,false), BwdTrans_SumFac_Prism::create, "BwdTrans_SumFac_Prism"); +/** + * @brief Backward transform operator using sum-factorisation (Pyr) + */ +class BwdTrans_SumFac_Pyr : public Operator +{ + public: + OPERATOR_CREATE(BwdTrans_SumFac_Pyr) + + virtual ~BwdTrans_SumFac_Pyr() + { + } + + virtual void operator()( + const Array &input, + Array &output, + Array &output1, + Array &output2, + Array &wsp) + { + ASSERTL1(wsp.num_elements() == m_wspSize, + "Incorrect workspace size"); + + // Assign second half of workspace for 2nd DGEMM operation. + int totmodes = m_stdExp->GetNcoeffs(); + + Array wsp2 + = wsp + m_nmodes0*m_nmodes1*m_nquad2*m_numElmt; + + Vmath::Zero(m_nmodes0*m_nmodes1*m_nquad2*m_numElmt, wsp, 1); + int i = 0; + int j = 0; + int mode = 0; + int mode1 = 0; + int cnt = 0; + for (i = 0; i < m_nmodes0; ++i) + { + for (j = 0; j < m_nmodes1; ++j, ++cnt) + { + int ijmax = max(i,j); + Blas::Dgemm('N', 'N', m_nquad2, m_numElmt, m_nmodes2-ijmax, + 1.0, m_base2.get()+mode*m_nquad2, m_nquad2, + input.get()+mode1, totmodes, 0.0, + wsp.get() + cnt*m_nquad2*m_numElmt, m_nquad2); + mode += m_nmodes2-ijmax; + mode1 += m_nmodes2-ijmax; + } + + //increment mode in case order1!=order2 + for(j = m_nmodes1; j < m_nmodes2-i; ++j) + { + int ijmax = max(i,j); + mode += m_nmodes2-ijmax; + } + } + + // vertex mode - currently (1+c)/2 x (1-b)/2 x (1-a)/2 + // component is evaluated + if(m_sortTopVertex) + { + for(i = 0; i < m_numElmt; ++i) + { + // top singular vertex + // (1+c)/2 x (1+b)/2 x (1-a)/2 component + Blas::Daxpy(m_nquad2, input[1+i*totmodes], + m_base2.get() + m_nquad2, 1, + &wsp[m_nquad2*m_numElmt] + i*m_nquad2, 1); + + // top singular vertex + // (1+c)/2 x (1-b)/2 x (1+a)/2 component + Blas::Daxpy(m_nquad2, input[1+i*totmodes], + m_base2.get() + m_nquad2, 1, + &wsp[m_nmodes1*m_nquad2*m_numElmt] + + i*m_nquad2, 1); + + // top singular vertex + // (1+c)/2 x (1+b)/2 x (1+a)/2 component + Blas::Daxpy(m_nquad2, input[1+i*totmodes], + m_base2.get() + m_nquad2, 1, + &wsp[(m_nmodes1+1)*m_nquad2*m_numElmt] + + i*m_nquad2, 1); + + } + } + + // Perform summation over '1' direction + mode = 0; + for(i = 0; i < m_nmodes0; ++i) + { + Blas::Dgemm('N', 'T', m_nquad1, m_nquad2*m_numElmt, m_nmodes1, + 1.0, m_base1.get(), m_nquad1, + wsp.get() + mode*m_nquad2*m_numElmt, + m_nquad2*m_numElmt, + 0.0, wsp2.get() + i*m_nquad1*m_nquad2*m_numElmt, + m_nquad1); + mode += m_nmodes1; + } + + // Perform summation over '0' direction + Blas::Dgemm('N', 'T', m_nquad0, m_nquad1*m_nquad2*m_numElmt, + m_nmodes0, 1.0, m_base0.get(), m_nquad0, + wsp2.get(), m_nquad1*m_nquad2*m_numElmt, + 0.0, output.get(), m_nquad0); + } + + virtual void operator()( + int dir, + const Array &input, + Array &output, + Array &wsp) + { + ASSERTL0(false, "Not valid for this operator."); + } + + + protected: + const int m_nquad0; + const int m_nquad1; + const int m_nquad2; + const int m_nmodes0; + const int m_nmodes1; + const int m_nmodes2; + Array m_base0; + Array m_base1; + Array m_base2; + bool m_sortTopVertex; + + private: + BwdTrans_SumFac_Pyr( + vector pCollExp, + CoalescedGeomDataSharedPtr pGeomData) + : Operator (pCollExp, pGeomData), + m_nquad0 (m_stdExp->GetNumPoints(0)), + m_nquad1 (m_stdExp->GetNumPoints(1)), + m_nquad2 (m_stdExp->GetNumPoints(2)), + m_nmodes0 (m_stdExp->GetBasisNumModes(0)), + m_nmodes1 (m_stdExp->GetBasisNumModes(1)), + m_nmodes2 (m_stdExp->GetBasisNumModes(2)), + m_base0 (m_stdExp->GetBasis(0)->GetBdata()), + m_base1 (m_stdExp->GetBasis(1)->GetBdata()), + m_base2 (m_stdExp->GetBasis(2)->GetBdata()) + { + m_wspSize = m_numElmt*m_nmodes0*m_nquad2*(m_nmodes1 + m_nquad1); + + if(m_stdExp->GetBasis(0)->GetBasisType() + == LibUtilities::eModified_A) + { + m_sortTopVertex = true; + } + else + { + m_sortTopVertex = false; + } + + } +}; + +/// Factory initialisation for the BwdTrans_SumFac_Pyr operator +OperatorKey BwdTrans_SumFac_Pyr::m_type = GetOperatorFactory(). + RegisterCreatorFunction( + OperatorKey(ePyramid, eBwdTrans, eSumFac,false), + BwdTrans_SumFac_Pyr::create, "BwdTrans_SumFac_Pyr"); + } + } diff --git a/library/Collections/CMakeLists.txt b/library/Collections/CMakeLists.txt index fac078ef8489b9d118f873c906ef992d8953eacd..c7c1348b79e8706d3ae8404cbd6c50bf700bc4d4 100644 --- a/library/Collections/CMakeLists.txt +++ b/library/Collections/CMakeLists.txt @@ -10,7 +10,7 @@ SET(COLLECTIONS_SOURCES IProduct.cpp ) -SET(SOLVER_UTILS_HEADERS +SET(COLLECTIONS_HEADERS CoalescedGeomData.h Collection.h CollectionOptimisation.h @@ -18,9 +18,12 @@ SET(SOLVER_UTILS_HEADERS Operator.h ) -ADD_NEKTAR_LIBRARY(Collections lib ${NEKTAR_LIBRARY_TYPE} - ${COLLECTIONS_SOURCES} ${COLLECTIONS_HEADERS}) -TARGET_LINK_LIBRARIES(Collections LINK_PUBLIC LocalRegions) +ADD_NEKTAR_LIBRARY(Collections + SOURCES ${COLLECTIONS_SOURCES} + HEADERS ${COLLECTIONS_HEADERS} + DEPENDS LocalRegions + SUMMARY "Nektar++ Collections library" + DESCRIPTION "This library provides efficient multi-expansion implementations of operators on the various supported regions.") INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/Collections diff --git a/library/Collections/CollectionOptimisation.cpp b/library/Collections/CollectionOptimisation.cpp index 453faaeca51fdadd88c171f7e6e922238ff2c60e..53f9930b6fe0aacfa96c74e003f6e06f87f177ea 100644 --- a/library/Collections/CollectionOptimisation.cpp +++ b/library/Collections/CollectionOptimisation.cpp @@ -84,11 +84,16 @@ CollectionOptimisation::CollectionOptimisation( { for (it2 = elTypes.begin(); it2 != elTypes.end(); ++it2) { - defaultsPhysDeriv [ElmtOrder(it2->second, -1)] = eNoCollection; + // For 1<=N<=5 use StdMat otherwise IterPerExp or given default type for (int i = 1; i < 5; ++i) { defaults[ElmtOrder(it2->second, i)] = eStdMat; } + + // For 1<=N<=3 use SumFac otherwise NoCollection. Note that + // default is not currently overwritten by given default + // type + defaultsPhysDeriv [ElmtOrder(it2->second, -1)] = eNoCollection; for (int i = 1; i < 3; ++i) { defaultsPhysDeriv[ElmtOrder(it2->second, i)] = eSumFac; @@ -157,6 +162,7 @@ CollectionOptimisation::CollectionOptimisation( ASSERTL0(i != Collections::SIZE_ImplementationType, "Unknown default collection scheme: "+collinfo); + defaults.clear(); // Override default types for (it2 = elTypes.begin(); it2 != elTypes.end(); ++it2) { @@ -341,7 +347,7 @@ OperatorImpMap CollectionOptimisation::SetWithTimings( Array outarray2(maxsize); Array outarray3(maxsize); - Timer t; + LibUtilities::Timer t; if(verbose) { diff --git a/library/Collections/IProduct.cpp b/library/Collections/IProduct.cpp index c33d5e48b810fea124e6e368229ff61b8cc2beb3..ec2cd3309f36b7d1639178501777c725fb146dde 100644 --- a/library/Collections/IProduct.cpp +++ b/library/Collections/IProduct.cpp @@ -422,6 +422,101 @@ void PrismIProduct(bool sortTopVertex, int numElmt, } +/** + * + */ +void PyrIProduct(bool sortTopVertex, int numElmt, + int nquad0, int nquad1, int nquad2, + int nmodes0, int nmodes1, int nmodes2, + const Array &base0, + const Array &base1, + const Array &base2, + const Array &jac, + const Array &input, + Array &output, + Array &wsp) +{ + int totmodes = LibUtilities::StdPyrData::getNumberOfCoefficients( + nmodes0,nmodes1,nmodes2); + int totpoints = nquad0*nquad1*nquad2; + int cnt; + int mode, mode1; + + ASSERTL1(wsp.num_elements() >= numElmt*(nquad1*nquad2*nmodes0 + + nquad2*max(nquad0*nquad1,nmodes0*nmodes1)), + "Insufficient workspace size"); + + Vmath::Vmul(numElmt*totpoints,jac,1,input,1,wsp,1); + + Array wsp1 = wsp + numElmt * nquad2 + * (max(nquad0*nquad1, + nmodes0*nmodes1)); + + // Perform iproduct with respect to the '0' direction + Blas::Dgemm('T', 'N', nquad1*nquad2*numElmt, nmodes0, nquad0, + 1.0, wsp.get(), nquad0, base0.get(), + nquad0, 0.0, wsp1.get(), nquad1*nquad2*numElmt); + + // Inner product with respect to the '1' direction + mode = 0; + for(int i=0; i < nmodes0; ++i) + { + Blas::Dgemm('T', 'N', nquad2*numElmt, nmodes1, nquad1, + 1.0, wsp1.get()+ i*nquad1*nquad2*numElmt, nquad1, + base1.get(), nquad1, + 0.0, wsp.get() + mode*nquad2*numElmt,nquad2*numElmt); + mode += nmodes1; + } + + // Inner product with respect to the '2' direction + mode = mode1 = cnt = 0; + for(int i = 0; i < nmodes0; ++i) + { + for(int j = 0; j < nmodes1; ++j, ++cnt) + { + int ijmax = max(i,j); + Blas::Dgemm('T', 'N', nmodes2-ijmax, numElmt, nquad2, + 1.0, base2.get()+mode*nquad2, nquad2, + wsp.get()+cnt*nquad2*numElmt, nquad2, + 0.0, output.get()+mode1, totmodes); + mode += nmodes2-ijmax; + mode1 += nmodes2-ijmax; + } + + //increment mode in case order1!=order2 + for(int j = nmodes1; j < nmodes2; ++j) + { + int ijmax = max(i,j); + mode += nmodes2-ijmax; + } + } + + // fix for modified basis for top singular vertex component + // Already have evaluated (1+c)/2 (1-b)/2 (1-a)/2 + if(sortTopVertex) + { + for(int n = 0; n < numElmt; ++n) + { + // add in (1+c)/2 (1+b)/2 component + output[1+n*totmodes] += Blas::Ddot(nquad2, + base2.get()+nquad2,1, + &wsp[nquad2*numElmt + n*nquad2],1); + + // add in (1+c)/2 (1-b)/2 (1+a)/2 component + output[1+n*totmodes] += Blas::Ddot(nquad2, + base2.get()+nquad2,1, + &wsp[nquad2*nmodes1*numElmt+n*nquad2],1); + + // add in (1+c)/2 (1+b)/2 (1+a)/2 component + output[1+n*totmodes] += Blas::Ddot(nquad2, + base2.get()+nquad2,1, + &wsp[nquad2*(nmodes1+1)*numElmt+n*nquad2],1); + } + } +} + + + /** * */ diff --git a/library/Collections/IProduct.h b/library/Collections/IProduct.h index 6aa96ca3b61b159ca06ab22fde27216a375a4583..792021340d93df7b4f46e9d7cac558d1d4f5e455 100644 --- a/library/Collections/IProduct.h +++ b/library/Collections/IProduct.h @@ -82,6 +82,19 @@ void PrismIProduct(bool sortTopVert, int numElmt, Array &output, Array &wsp); + +void PyrIProduct(bool sortTopVert, int numElmt, + int nquad0, int nquad1, int nquad2, + int nmodes0, int nmodes1, int nmodes2, + const Array &base0, + const Array &base1, + const Array &base2, + const Array &jac, + const Array &input, + Array &output, + Array &wsp); + + void TetIProduct(bool sortTopEdge, int numElmt, int nquad0, int nquad1, int nquad2, int nmodes0, int nmodes1, int nmodes2, diff --git a/library/Collections/IProductWRTBase.cpp b/library/Collections/IProductWRTBase.cpp index 87e3e18777b2d2e0b88e886113399446a0569a80..3c08dd4cbe688fe7af3c1402332149f78c0bc880 100644 --- a/library/Collections/IProductWRTBase.cpp +++ b/library/Collections/IProductWRTBase.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include #include @@ -586,7 +585,7 @@ OperatorKey IProductWRTBase_SumFac_Tri::m_type = GetOperatorFactory(). /** - * @brief Backward transform operator using sum-factorisation (Hex) + * @brief Inner Product operator using sum-factorisation (Hex) */ class IProductWRTBase_SumFac_Hex : public Operator { @@ -765,7 +764,7 @@ OperatorKey IProductWRTBase_SumFac_Tet::m_type = GetOperatorFactory(). /** - * @brief Backward transform operator using sum-factorisation (Prism) + * @brief Inner Product operator using sum-factorisation (Prism) */ class IProductWRTBase_SumFac_Prism : public Operator { @@ -856,5 +855,99 @@ OperatorKey IProductWRTBase_SumFac_Prism::m_type = GetOperatorFactory(). OperatorKey(ePrism, eIProductWRTBase, eSumFac,false), IProductWRTBase_SumFac_Prism::create, "IProductWRTBase_SumFac_Prism"); + +/** + * @brief Inner Product operator using sum-factorisation (Pyr) + */ +class IProductWRTBase_SumFac_Pyr : public Operator +{ + public: + OPERATOR_CREATE(IProductWRTBase_SumFac_Pyr) + + virtual ~IProductWRTBase_SumFac_Pyr() + { + } + + virtual void operator()( + const Array &input, + Array &output, + Array &output1, + Array &output2, + Array &wsp) + { + + ASSERTL1(wsp.num_elements() == m_wspSize, + "Incorrect workspace size"); + + PyrIProduct(m_sortTopVertex, m_numElmt, + m_nquad0, m_nquad1, m_nquad2, + m_nmodes0, m_nmodes1, m_nmodes2, + m_base0, m_base1, m_base2, + m_jac,input,output,wsp); + } + + virtual void operator()( + int dir, + const Array &input, + Array &output, + Array &wsp) + { + ASSERTL0(false, "Not valid for this operator."); + } + + protected: + const int m_nquad0; + const int m_nquad1; + const int m_nquad2; + const int m_nmodes0; + const int m_nmodes1; + const int m_nmodes2; + Array m_jac; + Array m_base0; + Array m_base1; + Array m_base2; + bool m_sortTopVertex; + + private: + IProductWRTBase_SumFac_Pyr( + vector pCollExp, + CoalescedGeomDataSharedPtr pGeomData) + : Operator (pCollExp, pGeomData), + m_nquad0 (m_stdExp->GetNumPoints(0)), + m_nquad1 (m_stdExp->GetNumPoints(1)), + m_nquad2 (m_stdExp->GetNumPoints(2)), + m_nmodes0 (m_stdExp->GetBasisNumModes(0)), + m_nmodes1 (m_stdExp->GetBasisNumModes(1)), + m_nmodes2 (m_stdExp->GetBasisNumModes(2)), + m_base0 (m_stdExp->GetBasis(0)->GetBdata()), + m_base1 (m_stdExp->GetBasis(1)->GetBdata()), + m_base2 (m_stdExp->GetBasis(2)->GetBdata()) + + { + m_jac = pGeomData->GetJacWithStdWeights(pCollExp); + + m_wspSize = m_numElmt * m_nquad2 + *(max(m_nquad0*m_nquad1,m_nmodes0*m_nmodes1)) + + m_nquad1*m_nquad2*m_numElmt*m_nmodes0; + + if(m_stdExp->GetBasis(0)->GetBasisType() + == LibUtilities::eModified_A) + { + m_sortTopVertex = true; + } + else + { + m_sortTopVertex = false; + } + } +}; + +/// Factory initialisation for the IProductWRTBase_SumFac_Pyr operator +OperatorKey IProductWRTBase_SumFac_Pyr::m_type = GetOperatorFactory(). + RegisterCreatorFunction( + OperatorKey(ePyramid, eIProductWRTBase, eSumFac,false), + IProductWRTBase_SumFac_Pyr::create, "IProductWRTBase_SumFac_Pyr"); + + } } diff --git a/library/Collections/IProductWRTDerivBase.cpp b/library/Collections/IProductWRTDerivBase.cpp index ee1557a118ddf69502cf508c0f9bd9e2aac84db7..768d917345050bbf310f159f36e3bbac6f924ce3 100644 --- a/library/Collections/IProductWRTDerivBase.cpp +++ b/library/Collections/IProductWRTDerivBase.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include #include @@ -148,7 +147,7 @@ class IProductWRTDerivBase_StdMat : public Operator { LibUtilities::PointsKeyVector PtsKey = m_stdExp->GetPointsKeys(); m_dim = PtsKey.size(); - m_coordim = m_stdExp->GetCoordim(); + m_coordim = pCollExp[0]->GetCoordim(); int nqtot = m_stdExp->GetTotPoints(); int nmodes = m_stdExp->GetNcoeffs(); @@ -697,7 +696,38 @@ class IProductWRTDerivBase_SumFac_Tri : public Operator { } - virtual void operator()( + /** + * This method calculates: + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) \f] + * + * which can be represented in terms of local cartesian + * derivaties as: + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dx + + * d\phi/d\xi_1\, d\xi_1/dx),in[0]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dy + + * d\phi/d\xi_1\, d\xi_1/dy),in[1]) + \f] + * + * where we note that + * + * \f[ d\phi/d\xi_0 = d\phi/d\eta_0\, d\eta_0/d\xi_0 = + * d\phi/d\eta_0 2/(1-\eta_1) \f] + * + * \f[ d\phi/d\xi_1 = d\phi/d\eta_1\, d\eta_1/d\xi_1 + + * d\phi/d\eta_1\, d\eta_1/d\xi_1 = d\phi/d\eta_0 (1+\eta_0)/(1-\eta_1) + * + d\phi/d\eta_1 \f] + * + * and so the full inner products are + * + * \f[ (d\phi/dx,in[0]) + (dphi/dy,in[1]) = + * (d\phi/d\eta_0, ((2/(1-\eta_1) (d\xi_0/dx in[0] + d\xi_0/dy in[1]) + * + (1-\eta_0)/(1-\eta_1) (d\xi_1/dx in[0]+d\xi_1/dy in[1])) + * + (d\phi/d\eta_1, (d\xi_1/dx in[0] + d\xi_1/dy in[1])) \f] + * + */ + virtual void operator()( const Array &entry0, Array &entry1, Array &entry2, @@ -719,26 +749,6 @@ class IProductWRTDerivBase_SumFac_Tri : public Operator tmp[0] = wsp; tmp[1] = wsp + nmax; wsp1 = wsp + 2*nmax; - - // calculate (dphi/dx,in[0]) = ((dphi/dxi_0 dxi_0/dx + - // dphi/dxi_1 dxi_1/dx),in[0]) - // + (dphi/dy,in[1]) = ((dphi/dxi_0 dxi_0/dy + - // dphi/dxi_1 dxi_1/dy),in[1]) - // - // Note dphi/dxi_0 = - // dphi/deta_0 deta_0/dxi_0 = dphi/deta_0 2/(1-eta_1) - // - // dphi/dxi_1 = - // dphi/deta_1 deta_1/dxi_1 + dphi/deta_1 deta_1/dxi_1 = - // dphi/deta_0 (1+eta_0)/(1-eta_1) + dphi/deta_1 - // - // and so the full inner products are - // - // (dphi/dx,in[0]) + (dphi/dy,in[1]) - // = (dphi/deta_0, ((2/(1-eta_1) (dxi_0/dx in[0]+dxi_0/dy in[1]) - // + (1_eta_0)/(1-eta_1) (dxi_1/dx in[0]+dxi_1/dy in[1])) - // + (dphi/deta_1, (dxi_1/dx in[0] + dxi_1/dy in[1])) - for(int i = 0; i < 2; ++i) { Vmath::Vmul (ntot,m_derivFac[i],1, in[0],1, tmp[i],1); @@ -1017,6 +1027,60 @@ class IProductWRTDerivBase_SumFac_Tet : public Operator public: OPERATOR_CREATE(IProductWRTDerivBase_SumFac_Tet) + + /** + * This method calculates: + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) \f] + * + * which can be represented in terms of local cartesian + * derivaties as: + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dx + + * d\phi/d\xi_1\, d\xi_1/dx + + * d\phi/d\xi_2\, d\xi_2/dx),in[0]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dy + + * d\phi/d\xi_1\, d\xi_1/dy + + * d\phi/d\xi_2\, d\xi_2/dy),in[1]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dz + + * d\phi/d\xi_1\, d\xi_1/dz + + * d\phi/d\xi_2\, d\xi_2/dz),in[2]) \, \f] + * + * where we note that + * + * \f[ d\phi/d\xi_0 = d\phi/d\eta_0 4/((1-\eta_1)(1-\eta_2)) /f] + * + * \f[ d\phi/d\xi_1 = d\phi/d\eta_0 2(1+\eta_0)/((1-\eta_1)(1-\eta_2)) + * + d\phi/d\eta_1 2/(1-\eta_2) \f] + * + * \f[ d\phi/d\xi_2 = d\phi/d\eta_0 2(1+\eta_0)/((1-\eta_1)(1-\eta_2)) + * + d\phi/d\eta_1 (1+\eta_1)/(1-\eta_2) + d\phi/d\eta_2 \f] + * + * and so the full inner products are + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) = \f] + * + * \f[ (d\phi/d\eta_0, fac0 (tmp0 + fac1(tmp1 + tmp2))) + * + (d\phi/d\eta_1, fac2 (tmp1 + fac3 tmp2)) + * + (d\phi/d\eta_2, tmp2) \f] + * + * where + * + * \f[ \begin{array}{lcl} + * tmp0 &=& (d\xi_0/dx in[0] + d\xi_0/dy in[1] + d\xi_0/dz in[2]) \\ + * tmp1 &=& (d\xi_1/dx in[0] + d\xi_1/dy in[1] + d\xi_1/dz in[2]) \\ + * tmp2 &=& (d\xi_2/dx in[0] + d\xi_2/dy in[1] + d\xi_2/dz in[2]) + * \end{array} \f] + * + * \f[ \begin{array}{lcl} + * fac0 &= & 4/((1-\eta_1)(1-\eta_2)) \\ + * fac1 &= & (1+\eta_0)/2 \\ + * fac2 &= & 2/(1-\eta_2) \\ + * fac3 &= & (1+\eta_1)/2 \end{array} \f] + * + */ virtual void operator()( const Array &entry0, Array &entry1, @@ -1042,44 +1106,6 @@ class IProductWRTDerivBase_SumFac_Tet : public Operator tmp[i] = wsp + i*nmax; } - - // calculate (dphi/dx,in[0]) = ((dphi/dxi_0 dxi_0/dx + - // dphi/dxi_1 dxi_1/dx + - // dphi/dxi_2 dxi_2/dx),in[0]) - // + (dphi/dy,in[1]) = ((dphi/dxi_0 dxi_0/dy + - // dphi/dxi_1 dxi_1/dy + - // dphi/dxi_2 dxi_2/dy),in[1]) - // + (dphi/dz,in[2]) = ((dphi/dxi_0 dxi_0/dz + - // dphi/dxi_1 dxi_1/dz + - // dphi/dxi_2 dxi_2/dz),in[1]) - // - // Note dphi/dxi_0 = - // dphi/deta_0 4/((1-eta_1)(1-eta2)) - // - // dphi/dxi_1 = - // dphi/deta_0 2(1+eta_0)/((1-eta_1)(1-eta_2)) + - // dphi/deta_1 2/(1-eta_2) - // - // dphi/dxi_2 = - // dphi/deta_0 2(1+eta_0)/((1-eta_1)(1-eta_2)) + - // dphi/deta_1 (1+eta_1)/(1-eta_2) + dphi/deta_2 - // - // and so the full inner products are - // - // (dphi/dx,in[0]) + (dphi/dy,in[1]) + (dphi/dz,in[2]) - // = (dphi/deta_0, fac0 (tmp0 + fac1(tmp1 + tmp2))) - // + (dphi/deta_1, fac2 (tmp1 + fac3 tmp2)) - // + (dphi/deta_2, tmp2) - // - // tmp0 = (dxi_0/dx in[0] + dxi_0/dy in[1] + dxi_0/dz in[2]) - // tmp1 = (dxi_1/dx in[0] + dxi_1/dy in[1] + dxi_1/dz in[2]) - // tmp2 = (dxi_2/dx in[0] + dxi_2/dy in[1] + dxi_2/dz in[2]) - - // fac0 = 4/((1-eta_1)(1-eta2)) - // fac1 = (1+eta_0)/2 - // fac2 = 2/(1-eta_2) - // fac3 = (1+eta_1)/2 - for(int i = 0; i < 3; ++i) { Vmath::Vmul (ntot,m_derivFac[i],1, in[0],1,tmp[i],1); @@ -1259,6 +1285,52 @@ class IProductWRTDerivBase_SumFac_Prism : public Operator { } + /** + * This method calculates: + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) \f] + * + * which can be represented in terms of local cartesian + * derivaties as: + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dx + + * d\phi/d\xi_1\, d\xi_1/dx + + * d\phi/d\xi_2\, d\xi_2/dx),in[0]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dy + + * d\phi/d\xi_1\, d\xi_1/dy + + * d\phi/d\xi_2\, d\xi_2/dy),in[1]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dz + + * d\phi/d\xi_1\, d\xi_1/dz + + * d\phi/d\xi_2\, d\xi_2/dz),in[2]) \, \f] + * + * where we note that + * + * \f[ d\phi/d\xi_0 = + * d\phi/d\eta_0 d\eta_0/d\xi_0 = d\phi/d\eta_0 2/(1-\eta_2) \f] + * + * \f[ d\phi/d\xi_2 = + * d\phi/d\eta_0 d\eta_0/d\xi_2 + d\phi/d\eta_2 d\eta_2/d\xi_2 = + * d\phi/d\eta_0 (1+\eta_0)/(1-\eta_2) + d\phi/d\eta_2 \f] + * + * + * and so the full inner products are + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) = \f] + * + * \f[ (d\phi/d\eta_0, ((2/(1-\eta_2) (d\xi_0/dx in[0] + d\xi_0/dy in[1] + * + d\xi_0/dz in[2]) + * + (1-\eta_0)/(1-\eta_2) (d\xi_2/dx in[0] + d\xi_2/dy in[1] + * + d\xi_2/dz in[2] )) + \f] + * + * \f[ (d\phi/d\eta_1, (d\xi_1/dx in[0] + d\xi_1/dy in[1] + * + d\xi_1/dz in[2])) + \f] + * + * \f[ (d\phi/d\eta_2, (d\xi_2/dx in[0] + d\xi_2/dy in[1] + * + d\xi_2/dz in[2])) \f] + * + */ virtual void operator()( const Array &entry0, Array &entry1, @@ -1284,32 +1356,6 @@ class IProductWRTDerivBase_SumFac_Prism : public Operator tmp[i] = wsp + i*nmax; } - // calculate (dphi/dx,in[0]) = ((dphi/dxi_0 dxi_0/dx + - // dphi/dxi_1 dxi_1/dx),in[0]) - // + (dphi/dy,in[1]) = ((dphi/dxi_0 dxi_0/dy + - // dphi/dxi_1 dxi_1/dy),in[1]) - // + (dphi/dz,in[2]) = ((dphi/dxi_0 dxi_0/dz + - // dphi/dxi_1 dxi_1/dz),in[2]) - // - // Note dphi/dxi_0 = - // dphi/deta_0 deta_0/dxi_0 = dphi/deta_0 2/(1-eta_2) - // - // dphi/dxi_2 = - // dphi/deta_0 deta_0/dxi_2 + dphi/deta_2 deta_2/dxi_2 = - // dphi/deta_0 (1+eta_0)/(1-eta_2) + dphi/deta_2 - // - // and so the full inner products are - // - // (dphi/dx,in[0]) + (dphi/dy,in[1]) + (dphi/dz,in[2]) - // = (dphi/deta_0, ((2/(1-eta_2) (dxi_0/dx in[0] + dxi_0/dy in[1] - // + dxi_0/dz in[2]) - // + (1_eta_0)/(1-eta_2) (dxi_2/dx in[0] + dxi_2/dy in[1] - // + dxi_2/dz in[2] )) - // + (dphi/deta_1, (dxi_1/dx in[0] + dxi_1/dy in[1] - // + dxi_1/dz in[2])) - // + (dphi/deta_2, (dxi_2/dx in[0] + dxi_2/dy in[1] - // + dxi_2/dz in[2])) - for(int i = 0; i < 3; ++i) { Vmath::Vmul (ntot,m_derivFac[i],1, in[0],1, @@ -1453,5 +1499,256 @@ OperatorKey IProductWRTDerivBase_SumFac_Prism::m_type = GetOperatorFactory(). IProductWRTDerivBase_SumFac_Prism::create, "IProductWRTDerivBase_SumFac_Prism"); + + +/** + * @brief Inner product WRT deriv base operator using sum-factorisation (Pyr) + */ +class IProductWRTDerivBase_SumFac_Pyr : public Operator +{ + public: + OPERATOR_CREATE(IProductWRTDerivBase_SumFac_Pyr) + + virtual ~IProductWRTDerivBase_SumFac_Pyr() + { + } + + + /** + * This method calculates: + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) \f] + * + * which can be represented in terms of local cartesian + * derivaties as: + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dx + + * d\phi/d\xi_1\, d\xi_1/dx + + * d\phi/d\xi_2\, d\xi_2/dx),in[0]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dy + + * d\phi/d\xi_1\, d\xi_1/dy + + * d\phi/d\xi_2\, d\xi_2/dy),in[1]) + \f] + * + * \f[ ((d\phi/d\xi_0\, d\xi_0/dz + + * d\phi/d\xi_1\, d\xi_1/dz + + * d\phi/d\xi_2\, d\xi_2/dz),in[2]) \, \f] + * + * where we note that + * + * \f[ d\phi/d\xi_0 = + * d\phi/d\eta_0\, d\eta_0/d\xi_0 = + * d\phi/d\eta_0\, 2/(1-\eta_2). \f] + * + * \f[ d\phi/d\xi_1 = + * d\phi/d\eta_1\, d\eta_1/d\xi_1 = + * d\phi/d\eta_1\, 2/(1-\eta_2) \f] + * + * \f[ d\phi/d\xi_2 = + * d\phi/d\eta_0\, d\eta_0/d\xi_2 + + * d\phi/d\eta_1\, d\eta_1/d\xi_2 + + * d\phi/d\eta_2\, d\eta_2/d\xi_2 = + * d\phi/d\eta_0 (1+\eta_0)/(1-\eta_2) + + * d\phi/d\eta_1 (1+\eta_1)/(1-\eta_2) + d\phi/d\eta_2 \f] + * + * and so the full inner products are + * + * \f[ (d\phi/dx,in[0]) + (d\phi/dy,in[1]) + (d\phi/dz,in[2]) = \f] + * + * \f[ (d\phi/d\eta_0, ((2/(1-\eta_2) (d\xi_0/dx in[0] + + * d\xi_0/dy in[1] + + * (1-\eta_0)/(1-\eta_2) (d\xi_2/dx in[0] + d\xi_2/dy in[1] + * + d\xi_2/dz in[2] )) + \f] + * \f[ (d\phi/d\eta_1, ((2/(1-\eta_2) (d\xi_1/dx in[0] + + * d\xi_0/dy in[1] + d\xi_0/dz in[2]) + + * (1-\eta_1)/(1-\eta_2) (d\xi_2/dx in[0] + d\xi_2/dy in[1] + + * d\xi_2/dz in[2] )) \f] + * + * \f[ (d\phi/d\eta_2, (d\xi_2/dx in[0] + d\xi_2/dy in[1] + + * d\xi_2/dz in[2])) \f] + */ + virtual void operator()( + const Array &entry0, + Array &entry1, + Array &entry2, + Array &entry3, + Array &wsp) + { + unsigned int nPhys = m_stdExp->GetTotPoints(); + unsigned int ntot = m_numElmt*nPhys; + unsigned int nmodes = m_stdExp->GetNcoeffs(); + unsigned int nmax = max(ntot,m_numElmt*nmodes); + Array > in(3); + Array output, wsp1; + Array > tmp(3); + + in[0] = entry0; in[1] = entry1; + in[2] = entry2; + + output = entry3; + + for(int i = 0; i < 3; ++i) + { + tmp[i] = wsp + i*nmax; + } + + for(int i = 0; i < 3; ++i) + { + Vmath::Vmul (ntot,m_derivFac[i],1, in[0],1, tmp[i],1); + for(int j = 1; j < 3; ++j) + { + Vmath::Vvtvp (ntot,m_derivFac[i+3*j],1, + in[j],1, tmp[i], 1, tmp[i],1); + } + } + wsp1 = wsp + 3*nmax; + + // Sort into eta factors + for (int i = 0; i < m_numElmt; ++i) + { + // scale tmp[0] by fac0 + Vmath::Vmul(nPhys,&m_fac0[0],1,tmp[0].get()+i*nPhys,1, + tmp[0].get()+i*nPhys,1); + + // scale tmp[2] by fac1 and add to tmp0 + Vmath::Vvtvp(nPhys,&m_fac1[0],1,tmp[2].get()+i*nPhys,1, + tmp[0].get()+i*nPhys,1,tmp[0].get()+i*nPhys,1); + + // scale tmp[1] by fac0 + Vmath::Vmul(nPhys,&m_fac0[0],1,tmp[1].get()+i*nPhys,1, + tmp[1].get()+i*nPhys,1); + + // scale tmp[2] by fac2 and add to tmp1 + Vmath::Vvtvp(nPhys,&m_fac2[0],1,tmp[2].get()+i*nPhys,1, + tmp[1].get()+i*nPhys,1,tmp[1].get()+i*nPhys,1); + } + + // calculate Iproduct WRT Std Deriv + PyrIProduct(m_sortTopVertex, m_numElmt, + m_nquad0, m_nquad1, m_nquad2, + m_nmodes0, m_nmodes1, m_nmodes2, + m_derbase0, m_base1, m_base2, + m_jac,tmp[0],output,wsp1); + + PyrIProduct(m_sortTopVertex, m_numElmt, + m_nquad0, m_nquad1, m_nquad2, + m_nmodes0, m_nmodes1, m_nmodes2, + m_base0, m_derbase1, m_base2, + m_jac,tmp[1],tmp[0],wsp1); + Vmath::Vadd(m_numElmt*nmodes,tmp[0],1,output,1,output,1); + + PyrIProduct(m_sortTopVertex, m_numElmt, + m_nquad0, m_nquad1, m_nquad2, + m_nmodes0, m_nmodes1, m_nmodes2, + m_base0, m_base1, m_derbase2, + m_jac,tmp[2],tmp[0],wsp1); + Vmath::Vadd(m_numElmt*nmodes,tmp[0],1,output,1,output,1); + } + + virtual void operator()( + int dir, + const Array &input, + Array &output, + Array &wsp) + { + ASSERTL0(false, "Not valid for this operator."); + } + + + protected: + const int m_nquad0; + const int m_nquad1; + const int m_nquad2; + const int m_nmodes0; + const int m_nmodes1; + const int m_nmodes2; + Array m_jac; + Array m_base0; + Array m_base1; + Array m_base2; + Array m_derbase0; + Array m_derbase1; + Array m_derbase2; + Array m_derivFac; + Array m_fac0; + Array m_fac1; + Array m_fac2; + bool m_sortTopVertex; + + private: + IProductWRTDerivBase_SumFac_Pyr( + vector pCollExp, + CoalescedGeomDataSharedPtr pGeomData) + : Operator (pCollExp, pGeomData), + m_nquad0 (m_stdExp->GetNumPoints(0)), + m_nquad1 (m_stdExp->GetNumPoints(1)), + m_nquad2 (m_stdExp->GetNumPoints(2)), + m_nmodes0 (m_stdExp->GetBasisNumModes(0)), + m_nmodes1 (m_stdExp->GetBasisNumModes(1)), + m_nmodes2 (m_stdExp->GetBasisNumModes(2)), + m_base0 (m_stdExp->GetBasis(0)->GetBdata()), + m_base1 (m_stdExp->GetBasis(1)->GetBdata()), + m_base2 (m_stdExp->GetBasis(2)->GetBdata()), + m_derbase0(m_stdExp->GetBasis(0)->GetDbdata()), + m_derbase1(m_stdExp->GetBasis(1)->GetDbdata()), + m_derbase2(m_stdExp->GetBasis(2)->GetDbdata()) + + { + m_jac = pGeomData->GetJacWithStdWeights(pCollExp); + m_wspSize = 6 * m_numElmt * (max(m_nquad0*m_nquad1*m_nquad2, + m_nmodes0*m_nmodes1*m_nmodes2)); + m_derivFac = pGeomData->GetDerivFactors(pCollExp); + + if(m_stdExp->GetBasis(0)->GetBasisType() + == LibUtilities::eModified_A) + { + m_sortTopVertex = true; + } + else + { + m_sortTopVertex = false; + } + + const Array& z0 + = m_stdExp->GetBasis(0)->GetZ(); + const Array& z1 + = m_stdExp->GetBasis(1)->GetZ(); + const Array& z2 + = m_stdExp->GetBasis(2)->GetZ(); + + m_fac0 = Array(m_nquad0*m_nquad1*m_nquad2); + m_fac1 = Array(m_nquad0*m_nquad1*m_nquad2); + m_fac2 = Array(m_nquad0*m_nquad1*m_nquad2); + + for (int i = 0; i < m_nquad0; ++i) + { + for(int j = 0; j < m_nquad1; ++j) + { + for(int k = 0; k < m_nquad2; ++k) + { + // set up geometric factor: 2/(1-z2) + m_fac0[i + j*m_nquad0 + k*m_nquad0*m_nquad1] + = 2.0/(1-z2[k]); + // set up geometric factor: (1+z0)/(1-z2) + m_fac1[i + j*m_nquad0 + k*m_nquad0*m_nquad1] + = (1+z0[i])/(1-z2[k]); + // set up geometric factor: (1+z1)/(1-z2) + m_fac2[i + j*m_nquad0 + k*m_nquad0*m_nquad1] + = (1+z1[j])/(1-z2[k]); + + } + } + } + } +}; + +/// Factory initialisation for the IProductWRTDerivBase_SumFac_Pyr operator +OperatorKey IProductWRTDerivBase_SumFac_Pyr::m_type = GetOperatorFactory(). + RegisterCreatorFunction( + OperatorKey(ePyramid, eIProductWRTDerivBase, eSumFac, false), + IProductWRTDerivBase_SumFac_Pyr::create, + "IProductWRTDerivBase_SumFac_Pyr"); + + } } diff --git a/library/Collections/Operator.cpp b/library/Collections/Operator.cpp index 48821b19a176d1a1bdea0a4b0aec53886627efb3..3b9cfe326bf4716731ccefe47b718463167baf22 100644 --- a/library/Collections/Operator.cpp +++ b/library/Collections/Operator.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include @@ -109,11 +108,8 @@ Operator::~Operator() */ OperatorFactory& GetOperatorFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static OperatorFactory instance; + return instance; } diff --git a/library/Collections/PhysDeriv.cpp b/library/Collections/PhysDeriv.cpp index da8eb73631f879b5a60e49782d11a83f97b6d9d1..bef96383d14f20a6229f94c1fca2547f0a24ed7f 100644 --- a/library/Collections/PhysDeriv.cpp +++ b/library/Collections/PhysDeriv.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include @@ -938,7 +937,7 @@ class PhysDeriv_SumFac_Hex : public Operator Array > out(3); out[0] = output0; out[1] = output1; out[2] = output2; - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -967,10 +966,10 @@ class PhysDeriv_SumFac_Hex : public Operator // calculate full derivative for(int i = 0; i < m_coordim; ++i) { - Vmath::Vmul(ntot,m_derivFac[i*m_dim],1,Diff[0],1,out[i],1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[i*3],1,Diff[0],1,out[i],1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[i*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[i*3+j], 1, Diff[j], 1, out[i], 1, out[i], 1); @@ -989,7 +988,7 @@ class PhysDeriv_SumFac_Hex : public Operator Array tmp0,tmp1,tmp2; Array > Diff(3); - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -1016,10 +1015,10 @@ class PhysDeriv_SumFac_Hex : public Operator } // calculate full derivative - Vmath::Vmul(ntot,m_derivFac[dir*m_dim],1,Diff[0],1,output,1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[dir*3],1,Diff[0],1,output,1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[dir*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[dir*3+j], 1, Diff[j], 1, output, 1, output, 1); @@ -1028,7 +1027,6 @@ class PhysDeriv_SumFac_Hex : public Operator protected: Array m_derivFac; - int m_dim; int m_coordim; const int m_nquad0; const int m_nquad1; @@ -1048,7 +1046,6 @@ class PhysDeriv_SumFac_Hex : public Operator { LibUtilities::PointsKeyVector PtsKey = m_stdExp->GetPointsKeys(); - m_dim = PtsKey.size(); m_coordim = m_stdExp->GetCoordim(); m_derivFac = pGeomData->GetDerivFactors(pCollExp); @@ -1096,7 +1093,7 @@ class PhysDeriv_SumFac_Tet : public Operator Array > out(3); out[0] = output0; out[1] = output1; out[2] = output2; - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -1161,10 +1158,10 @@ class PhysDeriv_SumFac_Tet : public Operator // calculate full derivative for(int i = 0; i < m_coordim; ++i) { - Vmath::Vmul(ntot,m_derivFac[i*m_dim],1,Diff[0],1,out[i],1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[i*3],1,Diff[0],1,out[i],1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[i*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[i*3+j], 1, Diff[j], 1, out[i], 1, out[i], 1); } } @@ -1181,7 +1178,7 @@ class PhysDeriv_SumFac_Tet : public Operator Array tmp0,tmp1,tmp2; Array > Diff(3); - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -1244,17 +1241,16 @@ class PhysDeriv_SumFac_Tet : public Operator } // calculate full derivative - Vmath::Vmul(ntot,m_derivFac[dir*m_dim],1,Diff[0],1,output,1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[dir*3],1,Diff[0],1,output,1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[dir*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[dir*3+j], 1, Diff[j], 1, output, 1, output, 1); } } protected: Array m_derivFac; - int m_dim; int m_coordim; const int m_nquad0; const int m_nquad1; @@ -1278,7 +1274,6 @@ class PhysDeriv_SumFac_Tet : public Operator { LibUtilities::PointsKeyVector PtsKey = m_stdExp->GetPointsKeys(); - m_dim = PtsKey.size(); m_coordim = m_stdExp->GetCoordim(); m_derivFac = pGeomData->GetDerivFactors(pCollExp); @@ -1358,7 +1353,7 @@ class PhysDeriv_SumFac_Prism : public Operator Array > out(3); out[0] = output0; out[1] = output1; out[2] = output2; - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -1401,10 +1396,10 @@ class PhysDeriv_SumFac_Prism : public Operator // calculate full derivative for(int i = 0; i < m_coordim; ++i) { - Vmath::Vmul(ntot,m_derivFac[i*m_dim],1,Diff[0],1,out[i],1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[i*3],1,Diff[0],1,out[i],1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[i*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[i*3+j], 1, Diff[j], 1, out[i], 1, out[i], 1); } } @@ -1421,7 +1416,7 @@ class PhysDeriv_SumFac_Prism : public Operator Array tmp0,tmp1,tmp2; Array > Diff(3); - for(int i = 0; i < m_dim; ++i) + for(int i = 0; i < 3; ++i) { Diff[i] = wsp + i*ntot; } @@ -1462,17 +1457,16 @@ class PhysDeriv_SumFac_Prism : public Operator } // calculate full derivative - Vmath::Vmul(ntot,m_derivFac[dir*m_dim],1,Diff[0],1,output,1); - for(int j = 1; j < m_dim; ++j) + Vmath::Vmul(ntot,m_derivFac[dir*3],1,Diff[0],1,output,1); + for(int j = 1; j < 3; ++j) { - Vmath::Vvtvp (ntot, m_derivFac[dir*m_dim+j], 1, + Vmath::Vvtvp (ntot, m_derivFac[dir*3+j], 1, Diff[j], 1, output, 1, output, 1); } } protected: Array m_derivFac; - int m_dim; int m_coordim; const int m_nquad0; const int m_nquad1; @@ -1494,7 +1488,6 @@ class PhysDeriv_SumFac_Prism : public Operator { LibUtilities::PointsKeyVector PtsKey = m_stdExp->GetPointsKeys(); - m_dim = PtsKey.size(); m_coordim = m_stdExp->GetCoordim(); m_derivFac = pGeomData->GetDerivFactors(pCollExp); @@ -1537,5 +1530,228 @@ OperatorKey PhysDeriv_SumFac_Prism::m_typeArr[] = { }; +/** + * @brief Phys deriv operator using sum-factorisation (Pyramid) + */ +class PhysDeriv_SumFac_Pyr : public Operator +{ + public: + OPERATOR_CREATE(PhysDeriv_SumFac_Pyr) + + virtual ~PhysDeriv_SumFac_Pyr() + { + } + + virtual void operator()( + const Array &input, + Array &output0, + Array &output1, + Array &output2, + Array &wsp) + { + int nPhys = m_stdExp->GetTotPoints(); + int ntot = m_numElmt*nPhys; + Array tmp0,tmp1,tmp2; + Array > Diff(3); + Array > out(3); + out[0] = output0; out[1] = output1; out[2] = output2; + + for(int i = 0; i < 3; ++i) + { + Diff[i] = wsp + i*ntot; + } + + // dEta0 + Blas::Dgemm('N','N', m_nquad0,m_nquad1*m_nquad2*m_numElmt, + m_nquad0,1.0, m_Deriv0,m_nquad0,&input[0], + m_nquad0,0.0,&Diff[0][0],m_nquad0); + + int cnt = 0; + for(int i = 0; i < m_numElmt; ++i) + { + + // dEta 1 + for (int j = 0; j < m_nquad2; ++j) + { + Blas::Dgemm('N', 'T', m_nquad0, m_nquad1, m_nquad1, + 1.0, &input[i*nPhys+j*m_nquad0*m_nquad1], + m_nquad0, m_Deriv1, m_nquad1, 0.0, + &Diff[1][i*nPhys+j*m_nquad0*m_nquad1], + m_nquad0); + } + + // dEta 2 + Blas::Dgemm('N','T',m_nquad0*m_nquad1,m_nquad2,m_nquad2, + 1.0, &input[i*nPhys],m_nquad0*m_nquad1, + m_Deriv2,m_nquad2, 0.0,&Diff[2][i*nPhys], + m_nquad0*m_nquad1); + + // dxi0 = 2/(1-eta_2) d Eta_0 + Vmath::Vmul(nPhys,&m_fac0[0],1,Diff[0].get()+cnt,1, + Diff[0].get()+cnt,1); + + // dxi1 = 2/(1-eta_2) d Eta_1 + Vmath::Vmul(nPhys,&m_fac0[0],1,Diff[1].get()+cnt,1, + Diff[1].get()+cnt,1); + + // dxi2 = (1+eta0)/(1-eta_2) d Eta_0 + d/dEta2; + Vmath::Vvtvp(nPhys,&m_fac1[0],1,Diff[0].get()+cnt,1, + Diff[2].get()+cnt,1,Diff[2].get()+cnt,1); + // dxi2 += (1+eta1)/(1-eta_2) d Eta_1 + Vmath::Vvtvp(nPhys,&m_fac2[0],1,Diff[1].get()+cnt,1, + Diff[2].get()+cnt,1,Diff[2].get()+cnt,1); + cnt += nPhys; + } + + // calculate full derivative + for(int i = 0; i < m_coordim; ++i) + { + Vmath::Vmul(ntot,m_derivFac[i*3],1,Diff[0],1,out[i],1); + for(int j = 1; j < 3; ++j) + { + Vmath::Vvtvp (ntot, m_derivFac[i*3+j], 1, + Diff[j], 1, out[i], 1, out[i], 1); + } + } + } + + virtual void operator()( + int dir, + const Array &input, + Array &output, + Array &wsp) + { + int nPhys = m_stdExp->GetTotPoints(); + int ntot = m_numElmt*nPhys; + Array tmp0,tmp1,tmp2; + Array > Diff(3); + + for(int i = 0; i < 3; ++i) + { + Diff[i] = wsp + i*ntot; + } + + // dEta0 + Blas::Dgemm('N','N', m_nquad0,m_nquad1*m_nquad2*m_numElmt, + m_nquad0,1.0, m_Deriv0,m_nquad0,&input[0], + m_nquad0,0.0,&Diff[0][0],m_nquad0); + + int cnt = 0; + for(int i = 0; i < m_numElmt; ++i) + { + // dEta 1 + for (int j = 0; j < m_nquad2; ++j) + { + Blas::Dgemm('N', 'T', m_nquad0, m_nquad1, m_nquad1, + 1.0, &input[i*nPhys+j*m_nquad0*m_nquad1], + m_nquad0, m_Deriv1, m_nquad1, 0.0, + &Diff[1][i*nPhys+j*m_nquad0*m_nquad1], + m_nquad0); + } + + // dEta 2 + Blas::Dgemm('N','T',m_nquad0*m_nquad1,m_nquad2,m_nquad2, + 1.0, &input[i*nPhys],m_nquad0*m_nquad1, + m_Deriv2,m_nquad2, 0.0,&Diff[2][i*nPhys], + m_nquad0*m_nquad1); + + // dxi0 = 2/(1-eta_2) d Eta_0 + Vmath::Vmul(nPhys,&m_fac0[0],1,Diff[0].get()+cnt,1, + Diff[0].get()+cnt,1); + + // dxi1 = 2/(1-eta_2) d Eta_1 + Vmath::Vmul(nPhys,&m_fac0[0],1,Diff[1].get()+cnt,1, + Diff[1].get()+cnt,1); + + // dxi2 = (1+eta0)/(1-eta_2) d Eta_0 + d/dEta2; + Vmath::Vvtvp(nPhys,&m_fac1[0],1,Diff[0].get()+cnt,1, + Diff[2].get()+cnt,1,Diff[2].get()+cnt,1); + // dxi2 = (1+eta1)/(1-eta_2) d Eta_1 + d/dEta2; + Vmath::Vvtvp(nPhys,&m_fac2[0],1,Diff[1].get()+cnt,1, + Diff[2].get()+cnt,1,Diff[2].get()+cnt,1); + cnt += nPhys; + } + + // calculate full derivative + Vmath::Vmul(ntot,m_derivFac[dir*3],1,Diff[0],1,output,1); + for(int j = 1; j < 3; ++j) + { + Vmath::Vvtvp (ntot, m_derivFac[dir*3+j], 1, + Diff[j], 1, output, 1, output, 1); + } + } + + protected: + Array m_derivFac; + int m_coordim; + const int m_nquad0; + const int m_nquad1; + const int m_nquad2; + NekDouble *m_Deriv0; + NekDouble *m_Deriv1; + NekDouble *m_Deriv2; + Array m_fac0; + Array m_fac1; + Array m_fac2; + + private: + PhysDeriv_SumFac_Pyr( + vector pCollExp, + CoalescedGeomDataSharedPtr pGeomData) + : Operator(pCollExp, pGeomData), + m_nquad0 (m_stdExp->GetNumPoints(0)), + m_nquad1 (m_stdExp->GetNumPoints(1)), + m_nquad2 (m_stdExp->GetNumPoints(2)) + { + LibUtilities::PointsKeyVector PtsKey = m_stdExp->GetPointsKeys(); + + m_coordim = m_stdExp->GetCoordim(); + + m_derivFac = pGeomData->GetDerivFactors(pCollExp); + + const Array& z0 + = m_stdExp->GetBasis(0)->GetZ(); + const Array& z1 + = m_stdExp->GetBasis(1)->GetZ(); + const Array& z2 + = m_stdExp->GetBasis(2)->GetZ(); + m_fac0 = Array(m_nquad0*m_nquad1*m_nquad2); + m_fac1 = Array(m_nquad0*m_nquad1*m_nquad2); + m_fac2 = Array(m_nquad0*m_nquad1*m_nquad2); + + int nq0_nq1 = m_nquad0*m_nquad1; + for (int i = 0; i < m_nquad0; ++i) + { + for(int j = 0; j < m_nquad1; ++j) + { + int ifac = i+j*m_nquad0; + for(int k = 0; k < m_nquad2; ++k) + { + m_fac0[ifac + k*nq0_nq1] = + 2.0/(1-z2[k]); + m_fac1[ifac + k*nq0_nq1] = + 0.5*(1+z0[i]); + m_fac2[ifac + k*nq0_nq1] = + 0.5*(1+z1[j]); + } + } + } + + m_Deriv0 = &((m_stdExp->GetBasis(0)->GetD())->GetPtr())[0]; + m_Deriv1 = &((m_stdExp->GetBasis(1)->GetD())->GetPtr())[0]; + m_Deriv2 = &((m_stdExp->GetBasis(2)->GetD())->GetPtr())[0]; + + m_wspSize = 3*m_nquad0*m_nquad1*m_nquad2*m_numElmt; + } +}; + +/// Factory initialisation for the PhysDeriv_SumFac_Pyr operators +OperatorKey PhysDeriv_SumFac_Pyr::m_typeArr[] = { + GetOperatorFactory().RegisterCreatorFunction( + OperatorKey(ePyramid, ePhysDeriv, eSumFac, false), + PhysDeriv_SumFac_Pyr::create, "PhysDeriv_SumFac_Pyr") +}; + + } } diff --git a/library/Demos/BlockMat/SCdemo.cpp b/library/Demos/BlockMat/SCdemo.cpp deleted file mode 100644 index 414027ea625e66d5610361e52d49b59ec0487d98..0000000000000000000000000000000000000000 --- a/library/Demos/BlockMat/SCdemo.cpp +++ /dev/null @@ -1,114 +0,0 @@ -#include "BlockMat.h" - -/* - g++ -I../../../include -g -o SCdemo SCdemo.cpp -L../../ -lBlockMat -lblas -llapack -lg2c -*/ - -using namespace blockmat; - -/* declare matrix - - | 1 2 - - | - A = | 3 4 - - | using submatrix mat = | 1 2 | - | - - 1 2 | | 3 4 | - | - - 3 4 | - - | 1 2 3 - - - - - - | - B = | 4 5 6 - - - - - - | using submatrix mat = | 1 2 3 | - | 1 2 3 1 2 3 1 2 3 | | 4 5 6 | - | 4 5 6 4 5 6 4 5 6 | - - | 1 2 3 - - - - - - | - | 4 5 6 - - - - - - | - C = | 7 0 0 - - - - - - | using submatrix mat = | 1 2 3 | - | - - - 1 2 3 - - - | | 4 5 6 | - | - - - 4 5 6 - - - | | 7 0 0 | - | - - - 7 0 0 - - - | - | - - - - - - 1 2 3 | - | - - - - - - 4 5 6 | - | - - - - - - 7 0 0 | - - | 1 2 1 2 | - D = | 3 4 3 4 | using submatrix mat = | 1 2 | - | 5 6 5 6 | | 3 4 | - | - - 1 2 | | 5 6 | - | - - 3 4 | - | - - 5 6 | - | - - 1 2 | - | - - 3 4 | - | - - 5 6 | - - Calculate DC = A - B*C*D - */ - -main(){ - BlockMat *A,*B,*C,*D,*T,*SC; - double *mat; - - mat = new double [9]; - mat[0] = 1; mat[1] = 2; mat[2] = 3; - mat[3] = 4; mat[4] = 5; mat[5] = 6; - mat[6] = 7; mat[7] = 0; mat[8] = 0; - - cout << "A: " << endl; - A = new BlockMat(2,2); - A->GenBlock(0,0,2,2,mat); - A->GenBlock(1,1,2,2,mat); - A->PrintBlocks(); - - cout << endl << "B: " << endl; - B = new BlockMat(2,3); - B->GenBlock(0,0,2,3,mat); - B->GenBlock(1,0,2,3,mat); - B->GenBlock(1,1,2,3,mat); - B->GenBlock(1,2,2,3,mat); - B->PrintBlocks(); - - cout << endl << "C: " << endl; - C = new BlockMat(3,3); - C->GenBlock(0,0,3,3,mat); - C->GenBlock(1,1,3,3,mat); - C->GenBlock(2,2,3,3,mat); - C->PrintBlocks(); - - cout << endl << "C^{-1}: " << endl; - C->invert_diag(); - C->PrintBlocks(); - - cout << endl << "D: " << endl; - D = new BlockMat(3,2); - D->GenBlock(0,0,3,2,mat); - D->GenBlock(0,1,3,2,mat); - D->GenBlock(1,1,3,2,mat); - D->GenBlock(2,1,3,2,mat); - D->PrintBlocks(); - - cout << endl << "SC=A-B*C*D: " << endl; - SC = new BlockMat(2,2); - T = new BlockMat(3,2); - // T->geMxM(RowMajor,RowMajor,1,*C,*D,0); - T->MxM(*C,*D); - SC->sub(*A,SC->MxM(*B,*T)); - SC->PrintBlocks(); - - cout << endl << "SC=A-D^T*C*D: " << endl; - T->MxM(*C,*D); - SC->sub(*A,SC->MtxM(*D,*T)); - SC->PrintBlocks(); - - cout << endl << "SC=A-B*C*B^T: " << endl; - T->MxMt(*C,*B); - //T->geMxM(RowMajor,ColMajor,1,*C,*B,0); - SC->sub(*A,SC->MxM(*B,*T)); - SC->PrintBlocks(); - - delete A; - delete B; - delete C; - delete D; - delete T; - delete SC; - delete[] mat; - - return 0; -} diff --git a/library/Demos/BlockMat/demo.cpp b/library/Demos/BlockMat/demo.cpp deleted file mode 100644 index 7ffb6ef29e38ad14d608aca52f936024b25956ae..0000000000000000000000000000000000000000 --- a/library/Demos/BlockMat/demo.cpp +++ /dev/null @@ -1,79 +0,0 @@ -#include "BlockMat.h" - -/* - g++ -I../../../include -g -o demo demo.cpp -L../../ -lBlockMat -lblas -lg2c -llapack -*/ - -using namespace blockmat; - -main(){ - int i; - BlockMat *A,*B,*C; - double *mat; - - /* declare matrix - | 1 2 - - | - A = | 3 4 - - | using submatrix mat = | 1 2 | - | - - 1 2 | | 3 4 | - | - - 3 4 | - */ - - mat = new double [4]; - mat[0] = 1; mat[1] = 2; mat[2] = 3; mat[3] = 4; - - cout << "A: " << endl; - A = new BlockMat(2,2); - A->GenBlock(0,0,2,2,mat); - A->GenBlock(0,1,2,2,mat); - A->GenBlock(1,1,2,2,mat); - A->PrintBlocks(); - - cout << endl << "B: " << endl; - B = new BlockMat(2,2); - B->GenBlock(0,0,2,2,mat); - B->GenBlock(1,1,2,2,mat); - B->PrintBlocks(); - - cout << endl << "C=A+B: " << endl; - C = new BlockMat(2,2); - C->add(*A,*B); - C->PrintBlocks(); - - - cout << endl << "C=A-B: " << endl; - C->sub(*A,*B); - C->PrintBlocks(); - - cout << endl << "C=A*B: " << endl; - C->MxM(*A,*B); - C->PrintBlocks(); - - - double *y = new(double)[4]; - double *v = new(double)[4]; - Vmath::fill(4,1.0,v,1); - Vmath::zero(4,y,1); - - cout << endl << "y = A*v: "<< endl; - C->Mxvpy(v,y); - for(i = 0; i < 4; ++i) - cout << y[i] << " "; - cout << endl; - - Vmath::zero(4,y,1); - cout << endl << "y = A^T*v: "<< endl; - //C->Mtxvpy(v,y); - C->geMxv(ColMajor,1,v,1,y); - for(i = 0; i < 4; ++i) - cout << y[i] << " "; - cout << endl; - - delete A; - delete B; - delete C; - delete[] y; - delete[] v; - delete[] mat; - - return 0; -} diff --git a/library/Demos/CMakeLists.txt b/library/Demos/CMakeLists.txt index d013d36ac7bff8078da7a3a3856d6ba30fdf0315..9d6d76960022829601a8e4110e59aab7867f907a 100644 --- a/library/Demos/CMakeLists.txt +++ b/library/Demos/CMakeLists.txt @@ -1 +1 @@ -SUBDIRS(LibUtilities StdRegions SpatialDomains LocalRegions Collections MultiRegions) +SUBDIRS(LibUtilities StdRegions LocalRegions Collections MultiRegions) diff --git a/library/Demos/Collections/CMakeLists.txt b/library/Demos/Collections/CMakeLists.txt index 53d38730ecdb1544eaa74ea7aa77760941049edf..16705a85fb43648ba37297ccf6c0bd5a83eed497 100644 --- a/library/Demos/Collections/CMakeLists.txt +++ b/library/Demos/Collections/CMakeLists.txt @@ -1,9 +1,2 @@ -SET(LinkLibraries MultiRegions) - -SET(CollectionTimingSource CollectionTiming.cpp) -ADD_NEKTAR_EXECUTABLE(CollectionTiming demos CollectionTimingSource) -TARGET_LINK_LIBRARIES(CollectionTiming ${LinkLibraries}) - -IF (NEKTAR_USE_MPI) -# ADD_NEKTAR_TEST(Helmholtz2D_CG_P7_Modes_AllBCs_xxt_full) -ENDIF (NEKTAR_USE_MPI) +ADD_NEKTAR_EXECUTABLE(CollectionTiming + COMPONENT demos DEPENDS MultiRegions SOURCES CollectionTiming.cpp) diff --git a/library/Demos/Collections/CollectionTiming.cpp b/library/Demos/Collections/CollectionTiming.cpp index 3877041bf08824ce9149241f55b61912c40cf4e7..4a1a4b2a5870d899140583ab185288be5775cda8 100644 --- a/library/Demos/Collections/CollectionTiming.cpp +++ b/library/Demos/Collections/CollectionTiming.cpp @@ -37,10 +37,9 @@ #include #include -#include - #include #include +#include #include #include #include @@ -49,11 +48,6 @@ using namespace std; using namespace Nektar; -using boost::timer::cpu_timer; -using boost::timer::cpu_times; -using boost::timer::nanosecond_type; -using boost::timer::format; - MultiRegions::ExpListSharedPtr SetupExpList( int N, LibUtilities::SessionReaderSharedPtr session, @@ -71,33 +65,21 @@ MultiRegions::ExpListSharedPtr SetupExpList( return expList; } -void printOutput(int N, int Ntest, cpu_timer &timer, bool fmt) +void printOutput(int N, int Ntest, LibUtilities::Timer &timer, bool fmt) { - cpu_times times = timer.elapsed(); - nanosecond_type total = times.user + times.system; - - const double sec = 1000000000.0L; - - // Normalize timings - double wall_sec = times.wall / sec; - double total_sec = total / sec; - - wall_sec /= Ntest; - total_sec /= Ntest; + // Get timings + NekDouble total_sec = timer.TimePerTest(Ntest); if (fmt) { cout << setw(6) << N-1 - << setw(18) << wall_sec << setw(18) << total_sec << endl; } else { cout << "P = " << N-1 << ": " - << wall_sec << " (wall) " - << total_sec << " (total), " - << (100.0 * total_sec / wall_sec) << "% CPU" + << total_sec << " s, " << endl; } } @@ -113,7 +95,7 @@ int main(int argc, char *argv[]) MultiRegions::ExpListSharedPtr expList; - cpu_timer timer; + LibUtilities::Timer timer; int Ntest, maxOrder; session->LoadParameter("Ntest", Ntest, 1000); @@ -141,12 +123,12 @@ int main(int argc, char *argv[]) Array input (expList->GetNcoeffs()); Array output(expList->GetNpoints()); - timer.start(); + timer.Start(); for (int i = 0; i < Ntest; ++i) { expList->BwdTrans(input, output); } - timer.stop(); + timer.Stop(); printOutput(N, Ntest, timer, fmt); } @@ -158,12 +140,12 @@ int main(int argc, char *argv[]) Array input (expList->GetNpoints()); Array output(expList->GetNcoeffs()); - timer.start(); + timer.Start(); for (int i = 0; i < Ntest; ++i) { expList->IProductWRTBase(input, output); } - timer.stop(); + timer.Stop(); printOutput(N, Ntest, timer, fmt); } @@ -181,12 +163,12 @@ int main(int argc, char *argv[]) input[i] = Array(expList->GetNpoints()); } - timer.start(); + timer.Start(); for (int i = 0; i < Ntest; ++i) { expList->IProductWRTDerivBase(input, output); } - timer.stop(); + timer.Stop(); printOutput(N, Ntest, timer, fmt); } @@ -200,12 +182,12 @@ int main(int argc, char *argv[]) Array output1(expList->GetNpoints()); Array output2(expList->GetNpoints()); - timer.start(); + timer.Start(); for (int i = 0; i < Ntest; ++i) { expList->PhysDeriv(input, output0, output1, output2); } - timer.stop(); + timer.Stop(); printOutput(N, Ntest, timer, fmt); } diff --git a/library/Demos/LibUtilities/CMakeLists.txt b/library/Demos/LibUtilities/CMakeLists.txt index 8f05075c26c78659097c2356cff88c7c60fcd140..e5c5051077c29b5772325c1d7954f8c9862029a4 100644 --- a/library/Demos/LibUtilities/CMakeLists.txt +++ b/library/Demos/LibUtilities/CMakeLists.txt @@ -1,40 +1,16 @@ - -#SET(GraphSources -# GraphExample.cpp) - -SET(MemoryManagerSources - MemoryManager.cpp) - -SET(PartitionAnalyseSources - PartitionAnalyse.cpp) - -SET(FoundationSources - FoundationDemo.cpp) - -SET(NodalDemoSources NodalDemo.cpp) - -SET(TimeIntegrationDemoSources - TimeIntegrationDemo.cpp) - -SET(FieldIOBenchmarkerSources - FieldIOBenchmarker.cpp) - -#ADD_NEKTAR_EXECUTABLE(Graph demos GraphSources ) -#SET_LAPACK_LINK_LIBRARIES(Graph) - -#ADD_NEKTAR_EXECUTABLE(MemoryManager MemoryManagerSources ) - -ADD_NEKTAR_EXECUTABLE(PartitionAnalyse demos PartitionAnalyseSources) -TARGET_LINK_LIBRARIES(PartitionAnalyse LibUtilities) - -ADD_NEKTAR_EXECUTABLE(FoundationDemo demos FoundationSources ) -TARGET_LINK_LIBRARIES(FoundationDemo LibUtilities) - -ADD_NEKTAR_EXECUTABLE(NodalDemo demos NodalDemoSources) -TARGET_LINK_LIBRARIES(NodalDemo LibUtilities) - -ADD_NEKTAR_EXECUTABLE(TimeIntegrationDemo demos TimeIntegrationDemoSources) -TARGET_LINK_LIBRARIES(TimeIntegrationDemo LibUtilities) +ADD_NEKTAR_EXECUTABLE(PartitionAnalyse + COMPONENT demos DEPENDS LibUtilities SOURCES PartitionAnalyse.cpp) +ADD_NEKTAR_EXECUTABLE(FoundationDemo + COMPONENT demos DEPENDS LibUtilities SOURCES FoundationDemo.cpp) +ADD_NEKTAR_EXECUTABLE(NodalDemo + COMPONENT demos DEPENDS LibUtilities SOURCES NodalDemo.cpp) +ADD_NEKTAR_EXECUTABLE(TimeIntegrationDemo + COMPONENT demos DEPENDS LibUtilities SOURCES TimeIntegrationDemo.cpp) + +IF(NEKTAR_USE_MPI) + ADD_NEKTAR_EXECUTABLE(FieldIOBenchmarker + COMPONENT demos DEPENDS LibUtilities SOURCES FieldIOBenchmarker.cpp) +ENDIF() ADD_NEKTAR_TEST(NodalDemo_Tri_Deriv_P8) ADD_NEKTAR_TEST(NodalDemo_Tri_Integral_P6) @@ -45,8 +21,3 @@ ADD_NEKTAR_TEST(NodalDemo_Prism_Interp_P7) ADD_NEKTAR_TEST(NodalDemo_Tet_Deriv_P8) ADD_NEKTAR_TEST(NodalDemo_Tet_Integral_P6) ADD_NEKTAR_TEST(NodalDemo_Tet_Interp_P7) - -IF(NEKTAR_USE_HDF5) - ADD_NEKTAR_EXECUTABLE(FieldIOBenchmarker demos FieldIOBenchmarkerSources) - TARGET_LINK_LIBRARIES(FieldIOBenchmarker LibUtilities) -ENDIF() diff --git a/library/Demos/LibUtilities/GraphExample.cpp b/library/Demos/LibUtilities/GraphExample.cpp deleted file mode 100644 index 72c18f90ad2fff291d3c1732f8544f0bb4652c96..0000000000000000000000000000000000000000 --- a/library/Demos/LibUtilities/GraphExample.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include -#include - -int main(int argc, char *argv[]) -{ - return 0; -} diff --git a/library/Demos/LibUtilities/Makefile.am b/library/Demos/LibUtilities/Makefile.am deleted file mode 100644 index baff32e528ec60f24a2d11e62fef0f859a63628f..0000000000000000000000000000000000000000 --- a/library/Demos/LibUtilities/Makefile.am +++ /dev/null @@ -1,13 +0,0 @@ - -bin_PROGRAMS = MemoryManagerExample - -#GraphExample_SOURCES = GraphExample.cpp -#VecMatExample_SOURCES = VecMatExample.cpp -MemoryManagerExample_SOURCES = MemoryManager.cpp - -#GraphExample_CPPFLAGS = -I$(srcdir)/../.. -#VecMatExample_CPPFLAGS = -I$(srcdir)/../.. -I$(srcdir)/../../../ThirdParty/met -I$(srcdir)/../../../ThirdParty/met/common -MemoryManagerExample_CPPFLAGS = -I$(srcdir)/../.. -MemoryManagerExample_LDADD = -lboost_thread-gcc-mt - - diff --git a/library/Demos/LibUtilities/Makefile.in b/library/Demos/LibUtilities/Makefile.in deleted file mode 100644 index 64bf78561177abcf328fae1fbb0fff1ab583778a..0000000000000000000000000000000000000000 --- a/library/Demos/LibUtilities/Makefile.in +++ /dev/null @@ -1,415 +0,0 @@ -# Makefile.in generated by automake 1.9.5 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005 Free Software Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -SOURCES = $(MemoryManagerExample_SOURCES) - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ -pkgdatadir = $(datadir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -top_builddir = ../.. -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -INSTALL = @INSTALL@ -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -bin_PROGRAMS = MemoryManagerExample$(EXEEXT) -subdir = Demos/LibUtilities -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -mkinstalldirs = $(install_sh) -d -CONFIG_CLEAN_FILES = -am__installdirs = "$(DESTDIR)$(bindir)" -binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) -PROGRAMS = $(bin_PROGRAMS) -am_MemoryManagerExample_OBJECTS = \ - MemoryManagerExample-MemoryManager.$(OBJEXT) -MemoryManagerExample_OBJECTS = $(am_MemoryManagerExample_OBJECTS) -MemoryManagerExample_DEPENDENCIES = -DEFAULT_INCLUDES = -I. -I$(srcdir) -depcomp = $(SHELL) $(top_srcdir)/depcomp -am__depfiles_maybe = depfiles -CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ - $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -CXXLD = $(CXX) -CXXLINK = $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) \ - -o $@ -SOURCES = $(MemoryManagerExample_SOURCES) -DIST_SOURCES = $(MemoryManagerExample_SOURCES) -ETAGS = etags -CTAGS = ctags -DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) -ACLOCAL = @ACLOCAL@ -AMDEP_FALSE = @AMDEP_FALSE@ -AMDEP_TRUE = @AMDEP_TRUE@ -AMTAR = @AMTAR@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -CPPFLAGS = @CPPFLAGS@ -CXX = @CXX@ -CXXCPP = @CXXCPP@ -CXXDEPMODE = @CXXDEPMODE@ -CXXFLAGS = @CXXFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DEPDIR = @DEPDIR@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LTLIBOBJS = @LTLIBOBJS@ -MAKEINFO = @MAKEINFO@ -OBJEXT = @OBJEXT@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -RANLIB = @RANLIB@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -STRIP = @STRIP@ -VERSION = @VERSION@ -ac_ct_CXX = @ac_ct_CXX@ -ac_ct_RANLIB = @ac_ct_RANLIB@ -ac_ct_STRIP = @ac_ct_STRIP@ -am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@ -am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@ -am__include = @am__include@ -am__leading_dot = @am__leading_dot@ -am__quote = @am__quote@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build_alias = @build_alias@ -datadir = @datadir@ -exec_prefix = @exec_prefix@ -host_alias = @host_alias@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -oldincludedir = @oldincludedir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -target_alias = @target_alias@ - -#GraphExample_SOURCES = GraphExample.cpp -#VecMatExample_SOURCES = VecMatExample.cpp -MemoryManagerExample_SOURCES = MemoryManager.cpp - -#GraphExample_CPPFLAGS = -I$(srcdir)/../.. -#VecMatExample_CPPFLAGS = -I$(srcdir)/../.. -I$(srcdir)/../../../ThirdParty/met -I$(srcdir)/../../../ThirdParty/met/common -MemoryManagerExample_CPPFLAGS = -I$(srcdir)/../.. -MemoryManagerExample_LDADD = -lboost_thread-gcc-mt -all: all-am - -.SUFFIXES: -.SUFFIXES: .cpp .o .obj -$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Demos/LibUtilities/Makefile'; \ - cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu Demos/LibUtilities/Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh - -$(top_srcdir)/configure: $(am__configure_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -$(ACLOCAL_M4): $(am__aclocal_m4_deps) - cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh -install-binPROGRAMS: $(bin_PROGRAMS) - @$(NORMAL_INSTALL) - test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ - if test -f $$p \ - ; then \ - f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ - else :; fi; \ - done - -uninstall-binPROGRAMS: - @$(NORMAL_UNINSTALL) - @list='$(bin_PROGRAMS)'; for p in $$list; do \ - f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ - rm -f "$(DESTDIR)$(bindir)/$$f"; \ - done - -clean-binPROGRAMS: - -test -z "$(bin_PROGRAMS)" || rm -f $(bin_PROGRAMS) -MemoryManagerExample$(EXEEXT): $(MemoryManagerExample_OBJECTS) $(MemoryManagerExample_DEPENDENCIES) - @rm -f MemoryManagerExample$(EXEEXT) - $(CXXLINK) $(MemoryManagerExample_LDFLAGS) $(MemoryManagerExample_OBJECTS) $(MemoryManagerExample_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/MemoryManagerExample-MemoryManager.Po@am__quote@ - -.cpp.o: -@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ $<; \ -@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< - -.cpp.obj: -@am__fastdepCXX_TRUE@ if $(CXXCOMPILE) -MT $@ -MD -MP -MF "$(DEPDIR)/$*.Tpo" -c -o $@ `$(CYGPATH_W) '$<'`; \ -@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/$*.Tpo" "$(DEPDIR)/$*.Po"; else rm -f "$(DEPDIR)/$*.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` - -MemoryManagerExample-MemoryManager.o: MemoryManager.cpp -@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(MemoryManagerExample_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MemoryManagerExample-MemoryManager.o -MD -MP -MF "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo" -c -o MemoryManagerExample-MemoryManager.o `test -f 'MemoryManager.cpp' || echo '$(srcdir)/'`MemoryManager.cpp; \ -@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo" "$(DEPDIR)/MemoryManagerExample-MemoryManager.Po"; else rm -f "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MemoryManager.cpp' object='MemoryManagerExample-MemoryManager.o' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(MemoryManagerExample_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MemoryManagerExample-MemoryManager.o `test -f 'MemoryManager.cpp' || echo '$(srcdir)/'`MemoryManager.cpp - -MemoryManagerExample-MemoryManager.obj: MemoryManager.cpp -@am__fastdepCXX_TRUE@ if $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(MemoryManagerExample_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT MemoryManagerExample-MemoryManager.obj -MD -MP -MF "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo" -c -o MemoryManagerExample-MemoryManager.obj `if test -f 'MemoryManager.cpp'; then $(CYGPATH_W) 'MemoryManager.cpp'; else $(CYGPATH_W) '$(srcdir)/MemoryManager.cpp'; fi`; \ -@am__fastdepCXX_TRUE@ then mv -f "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo" "$(DEPDIR)/MemoryManagerExample-MemoryManager.Po"; else rm -f "$(DEPDIR)/MemoryManagerExample-MemoryManager.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='MemoryManager.cpp' object='MemoryManagerExample-MemoryManager.obj' libtool=no @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(MemoryManagerExample_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o MemoryManagerExample-MemoryManager.obj `if test -f 'MemoryManager.cpp'; then $(CYGPATH_W) 'MemoryManager.cpp'; else $(CYGPATH_W) '$(srcdir)/MemoryManager.cpp'; fi` -uninstall-info-am: - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$tags $$unique; \ - fi -ctags: CTAGS -CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - tags=; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ - test -z "$(CTAGS_ARGS)$$tags$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$tags $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && cd $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) $$here - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -distdir: $(DISTFILES) - @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ - topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ - list='$(DISTFILES)'; for file in $$list; do \ - case $$file in \ - $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ - $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ - esac; \ - if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ - dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ - if test "$$dir" != "$$file" && test "$$dir" != "."; then \ - dir="/$$dir"; \ - $(mkdir_p) "$(distdir)$$dir"; \ - else \ - dir=''; \ - fi; \ - if test -d $$d/$$file; then \ - if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ - cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ - fi; \ - cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ - else \ - test -f $(distdir)/$$file \ - || cp -p $$d/$$file $(distdir)/$$file \ - || exit 1; \ - fi; \ - done -check-am: all-am -check: check-am -all-am: Makefile $(PROGRAMS) -installdirs: - for dir in "$(DESTDIR)$(bindir)"; do \ - test -z "$$dir" || $(mkdir_p) "$$dir"; \ - done -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - `test -z '$(STRIP)' || \ - echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am - -clean-am: clean-binPROGRAMS clean-generic mostlyclean-am - -distclean: distclean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -info: info-am - -info-am: - -install-data-am: - -install-exec-am: install-binPROGRAMS - -install-info: install-info-am - -install-man: - -installcheck-am: - -maintainer-clean: maintainer-clean-am - -rm -rf ./$(DEPDIR) - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am - -mostlyclean-am: mostlyclean-compile mostlyclean-generic - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: uninstall-binPROGRAMS uninstall-info-am - -.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ - clean-generic ctags distclean distclean-compile \ - distclean-generic distclean-tags distdir dvi dvi-am html \ - html-am info info-am install install-am install-binPROGRAMS \ - install-data install-data-am install-exec install-exec-am \ - install-info install-info-am install-man install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic mostlyclean mostlyclean-compile \ - mostlyclean-generic pdf pdf-am ps ps-am tags uninstall \ - uninstall-am uninstall-binPROGRAMS uninstall-info-am - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/library/Demos/LibUtilities/MemoryManager.cpp b/library/Demos/LibUtilities/MemoryManager.cpp deleted file mode 100644 index 7bbfa35ab737a48199b16417e41e2892cd94df8b..0000000000000000000000000000000000000000 --- a/library/Demos/LibUtilities/MemoryManager.cpp +++ /dev/null @@ -1,108 +0,0 @@ -/* -#include -#include - -#include -#include - -#include - -#include - -using namespace Nektar; -*/ - - -#include - -using namespace std; - -class Disabled -{ - public: - Disabled() - { - cout << "Creating a disabled." << endl; - } - - ~Disabled() - { - cout << "Destroying a disabled." << endl; - } -}; - -class Enabled -{ - public: - Enabled() - { - cout << "Creating a Enabled." << endl; - } - - ~Enabled() - { - cout << "Destroying a Enabled." << endl; - } -}; - - - -int main() -{ -// // Object allocation. -// Disabled* t1 = MemoryManager::Allocate(); -// MemoryManager::Deallocate(t1); -// assert(t1 == NULL); -// -// const Disabled* t2 = MemoryManager::Allocate(); -// MemoryManager::Deallocate(t2); -// assert(t2 == NULL); -// -// Enabled* t3 = MemoryManager::Allocate(); -// MemoryManager::Deallocate(t3); -// assert(t3 == NULL); -// -// boost::shared_ptr t4 = MemoryManager::AllocateSharedPtr(); -// boost::shared_ptr t5 = MemoryManager::AllocateSharedPtr(); -// -// // This doesn't compile, it really doesn't make sense to allow it to. -// //double* d = MemoryManager::Allocate(); -// -// cout << "\nDouble Array." << endl; -// double* d = MemoryManager::AllocateArray<10, double>(); -// MemoryManager::DeallocateArray<10>(d); -// -// cout << "\nDisabled Array" << endl; -// Disabled* a1 = MemoryManager::AllocateArray<2, Disabled>(); -// MemoryManager::DeallocateArray<2>(a1); -// -// cout << "\nEnabled Array." << endl; -// Enabled* a2 = MemoryManager::AllocateArray<3, Enabled>(); -// MemoryManager::DeallocateArray<3>(a2); -// -// boost::shared_array a3 = MemoryManager::AllocateSharedArray<3, Disabled>(); -// boost::shared_array a4 = MemoryManager::AllocateSharedArray<4, Enabled>(); -// boost::shared_array a5 = MemoryManager::AllocateSharedArray<5, double>(); -// -// boost::shared_array a6 = MemoryManager::AllocateSharedArray(12); -// -// cout << "\nDestroy all shared pointers." << endl; - - -// boost::shared_array a = MemoryManager::AllocateArray(); -// boost::shared_array d = MemoryManager::AllocateArray(); - // Allocate a raw -// boost::thread_group g; -// for(unsigned int i = 0; i < 2; ++i) -// { -// //g.create_thread(testThread); -// g.create_thread(testNakedThread); -// } -// -// g.join_all(); - - - - return 0; -} - diff --git a/library/Demos/LocalRegions/CMakeLists.txt b/library/Demos/LocalRegions/CMakeLists.txt index cdaf49fbe214f6ac71fd833ec0e64bc67e4aef46..15af00bbe1c3b90922f01822ab943bc8fbb1c367 100644 --- a/library/Demos/LocalRegions/CMakeLists.txt +++ b/library/Demos/LocalRegions/CMakeLists.txt @@ -1,28 +1,15 @@ -SET(LinkLibraries LocalRegions) - -SET(LocProject1DSource LocProject1D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject1D demos LocProject1DSource) -TARGET_LINK_LIBRARIES(LocProject1D ${LinkLibraries}) - -SET(LocProject2DSource LocProject2D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject2D demos LocProject2DSource) -TARGET_LINK_LIBRARIES(LocProject2D ${LinkLibraries}) - -SET(LocProject3DSource LocProject3D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject3D demos LocProject3DSource) -TARGET_LINK_LIBRARIES(LocProject3D ${LinkLibraries}) - -SET(LocProject_Diff1DSource LocProject_Diff1D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject_Diff1D demos LocProject_Diff1DSource) -TARGET_LINK_LIBRARIES(LocProject_Diff1D ${LinkLibraries}) - -SET(LocProject_Diff2DSource LocProject_Diff2D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject_Diff2D demos LocProject_Diff2DSource) -TARGET_LINK_LIBRARIES(LocProject_Diff2D ${LinkLibraries}) - -SET(LocProject_Diff3DSource LocProject_Diff3D.cpp) -ADD_NEKTAR_EXECUTABLE(LocProject_Diff3D demos LocProject_Diff3DSource) -TARGET_LINK_LIBRARIES(LocProject_Diff3D ${LinkLibraries}) +ADD_NEKTAR_EXECUTABLE(LocProject1D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject1D.cpp) +ADD_NEKTAR_EXECUTABLE(LocProject2D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject2D.cpp) +ADD_NEKTAR_EXECUTABLE(LocProject3D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject3D.cpp) +ADD_NEKTAR_EXECUTABLE(LocProject_Diff1D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject_Diff1D.cpp) +ADD_NEKTAR_EXECUTABLE(LocProject_Diff2D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject_Diff2D.cpp) +ADD_NEKTAR_EXECUTABLE(LocProject_Diff3D + COMPONENT demos DEPENDS LocalRegions SOURCES LocProject_Diff3D.cpp) # Generate list of available subdirectories FILE(GLOB dir_list "*") diff --git a/library/Demos/LocalRegions/HexMesh.xml b/library/Demos/LocalRegions/HexMesh.xml deleted file mode 100644 index c0857979c698eabf390851298fc865165cd8af06..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/HexMesh.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - 1.0 -1.0 1.0 - 1.0 1.0 1.0 - -1.0 1.0 1.0 - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 5 - 2 6 - 3 7 - 4 5 - 5 6 - 6 7 - 7 4 - - - - - 0 1 2 3 - 0 5 8 4 - 1 6 9 5 - 2 7 10 6 - 3 4 11 7 - 8 9 10 11 - - - - - - - - - 0 1 2 3 4 5 - - - - - - H[0] - - - C[0] - - - diff --git a/library/Demos/LocalRegions/LocProject1D.cpp b/library/Demos/LocalRegions/LocProject1D.cpp index ef237a49aeeca1c4cd1fddd99d817916697032b9..e7bd768624a8c42cb4f0de11f46a385e2c243ac9 100644 --- a/library/Demos/LocalRegions/LocProject1D.cpp +++ b/library/Demos/LocalRegions/LocProject1D.cpp @@ -31,15 +31,15 @@ int main(int argc, char *argv[]) "dictates the basis as:\n"); fprintf(stderr,"\t Ortho_A = 1\n"); fprintf(stderr,"\t Modified_A = 4\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Monomial = 12\n"); - fprintf(stderr,"\t FourierSingleMode = 13\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t FourierSingleMode = 15\n"); - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); + fprintf(stderr,"Note type = 1,2,4,5,7,8 are for higher dimensional basis\n"); exit(1); } diff --git a/library/Demos/LocalRegions/LocProject2D.cpp b/library/Demos/LocalRegions/LocProject2D.cpp index caf8bf29889ac20bfe2660ec5880cd27c745cbfc..e4bf248bd4519529b272025c420a9043d2970e65 100644 --- a/library/Demos/LocalRegions/LocProject2D.cpp +++ b/library/Demos/LocalRegions/LocProject2D.cpp @@ -53,15 +53,16 @@ int main(int argc, char *argv[]) fprintf(stderr,"\t Ortho_B = 2\n"); fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 13\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 14\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t Nodal tri (Electro) = 15\n"); + fprintf(stderr,"\t Nodal tri (Fekete) = 16\n"); - fprintf(stderr,"Note type = 3,6 are for three-dimensional basis\n"); + fprintf(stderr,"Note type = 3,6,7,8 are for three-dimensional basis\n"); fprintf(stderr,"The last series of values are the coordinates\n"); exit(1); @@ -78,17 +79,17 @@ int main(int argc, char *argv[]) int btype1_val = atoi(argv[2]); int btype2_val = atoi(argv[3]); - if(( btype1_val <= 11)&&( btype2_val <= 11)) + if(( btype1_val <= 14)&&( btype2_val <= 14)) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; } - else if(( btype1_val >=13)&&(btype2_val <= 14)) + else if(( btype1_val >=15)&&(btype2_val <= 16)) { btype1 = LibUtilities::eOrtho_A; btype2 = LibUtilities::eOrtho_B; - if(btype1_val == 13) + if(btype1_val == 15) { NodalType = LibUtilities::eNodalTriElec; } @@ -204,7 +205,7 @@ int main(int argc, char *argv[]) const LibUtilities::BasisKey Bkey1(btype1,order1,Pkey1); const LibUtilities::BasisKey Bkey2(btype2,order2,Pkey2); - if(btype1_val >= 11) + if(btype1_val >= 15) { E = new LocalRegions::NodalTriExp(Bkey1,Bkey2,NodalType,geom); } diff --git a/library/Demos/LocalRegions/LocProject3D.cpp b/library/Demos/LocalRegions/LocProject3D.cpp index 81607d1be34c1f0c0c7424755663652d4e939853..11456c9fb4701dfc0e8f6bc6fb443d54ad117788 100644 --- a/library/Demos/LocalRegions/LocProject3D.cpp +++ b/library/Demos/LocalRegions/LocProject3D.cpp @@ -88,10 +88,12 @@ int main(int argc, char *argv[]){ fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); fprintf(stderr,"\t Modified_C = 6\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Legendre = 9\n"); - fprintf(stderr,"\t Chebyshev = 10\n"); + fprintf(stderr,"\t OrthoPyr_C = 7\n"); + fprintf(stderr,"\t ModifiedPyr_C = 8\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Legendre = 11\n"); + fprintf(stderr,"\t Chebyshev = 12\n"); exit(1); } @@ -142,25 +144,28 @@ int main(int argc, char *argv[]){ break; case LibUtilities::ePyramid: if((btype1 == eOrtho_B) || (btype1 == eOrtho_C) - || (btype1 == eModified_B) || (btype1 == eModified_C)) + || (btype1 == eModified_B) || (btype1 == eModified_C) + || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, - "Basis 1 cannot be of type Ortho_B, Ortho_C, Modified_B " - "or Modified_C"); + "Basis 1 cannot be of type Ortho_B, Ortho_C, Modified_B, " + "Modified_C or ModifiedPyr_C"); } if((btype2 == eOrtho_B) || (btype2 == eOrtho_C) - || (btype2 == eModified_B) || (btype2 == eModified_C)) + || (btype2 == eModified_B) || (btype2 == eModified_C) + || (btype2 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, - "Basis 2 cannot be of type Ortho_B, Ortho_C, Modified_B " - "or Modified_C"); + "Basis 2 cannot be of type Ortho_B, Ortho_C, Modified_B, " + "Modified_C or ModifiedPyr_C"); } if((btype3 == eOrtho_A) || (btype3 == eOrtho_B) - || (btype3 == eModified_A) || (btype3 == eModified_B)) + || (btype3 == eModified_A) || (btype3 == eModified_B) + || (btype3 == eModified_C)) { NEKERROR(ErrorUtil::efatal, - "Basis 3 cannot be of type Ortho_A, Ortho_B, Modified_A " - "or Modified_B"); + "Basis 3 cannot be of type Ortho_A, Ortho_B, Modified_A, " + "Modified_B or ModifiedPyr_C"); } break; case LibUtilities::ePrism: diff --git a/library/Demos/LocalRegions/LocProject_Diff1D.cpp b/library/Demos/LocalRegions/LocProject_Diff1D.cpp index 0347fedf3df14c37889b5f56df84048b4f5c987f..17f2ad82204c9b8e0c69d83ced696f990f79be1a 100644 --- a/library/Demos/LocalRegions/LocProject_Diff1D.cpp +++ b/library/Demos/LocalRegions/LocProject_Diff1D.cpp @@ -34,15 +34,15 @@ int main(int argc, char *argv[]) "dictates the basis as:\n"); fprintf(stderr,"\t Ortho_A = 1\n"); fprintf(stderr,"\t Modified_A = 4\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Monomial = 12\n"); - fprintf(stderr,"\t FourierSingleMode = 13\n"); - - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t FourierSingleMode = 15\n"); + + fprintf(stderr,"Note type = 1,2,4,5,7,8 are for higher dimensional basis\n"); exit(1); } diff --git a/library/Demos/LocalRegions/LocProject_Diff2D.cpp b/library/Demos/LocalRegions/LocProject_Diff2D.cpp index ea06fedff4c17621c1efd33454909585ead738ff..2bde183f8c39ea51a55508731a3be323fa289ad2 100644 --- a/library/Demos/LocalRegions/LocProject_Diff2D.cpp +++ b/library/Demos/LocalRegions/LocProject_Diff2D.cpp @@ -63,16 +63,17 @@ int main(int argc, char *argv[]) fprintf(stderr,"\t Ortho_B = 2\n"); fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 13\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 14\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 13\n"); + fprintf(stderr,"\t Nodal tri (Electro) = 15\n"); + fprintf(stderr,"\t Nodal tri (Fekete) = 16\n"); - fprintf(stderr,"Note type = 3,6 are for three-dimensional basis\n"); + fprintf(stderr,"Note type = 3,6,7,8 are for three-dimensional basis\n"); fprintf(stderr,"The last series of values are the coordinates\n"); exit(1); @@ -90,17 +91,17 @@ int main(int argc, char *argv[]) int btype1_val = atoi(argv[2]); int btype2_val = atoi(argv[3]); - if(( btype1_val <= 11)&&( btype2_val <= 11)) + if(( btype1_val <= 14)&&( btype2_val <= 14)) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; } - else if(( btype1_val >=13)&&(btype2_val <= 14)) + else if(( btype1_val >=15)&&(btype2_val <= 16)) { btype1 = LibUtilities::eOrtho_A; btype2 = LibUtilities::eOrtho_B; - if(btype1_val == 13) + if(btype1_val == 15) { NodalType = LibUtilities::eNodalTriElec; } @@ -108,10 +109,8 @@ int main(int argc, char *argv[]) { NodalType = LibUtilities::eNodalTriFekete; } - } - // Check to see that correct Expansions are used switch(regionshape) { @@ -224,7 +223,7 @@ int main(int argc, char *argv[]) const LibUtilities::BasisKey Bkey1(btype1,order1,Pkey1); const LibUtilities::BasisKey Bkey2(btype2,order2,Pkey2); - if(btype1_val >= 11) + if(btype1_val >= 15) { E = new LocalRegions::NodalTriExp(Bkey1,Bkey2,NodalType,geom); } diff --git a/library/Demos/LocalRegions/LocProject_Diff3D.cpp b/library/Demos/LocalRegions/LocProject_Diff3D.cpp index 7948d7d8c2e93ee53688ef860b6214c73866511b..2e290d54c3fd84bff49dd3fc67a1feb1ef4cbe59 100644 --- a/library/Demos/LocalRegions/LocProject_Diff3D.cpp +++ b/library/Demos/LocalRegions/LocProject_Diff3D.cpp @@ -104,10 +104,12 @@ int main(int argc, char *argv[]){ fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); fprintf(stderr,"\t Modified_C = 6\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Legendre = 9\n"); - fprintf(stderr,"\t Chebyshev = 10\n"); + fprintf(stderr,"\t OrthoPyr_C = 7\n"); + fprintf(stderr,"\t ModifiedPyr_C = 8\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Legendre = 11\n"); + fprintf(stderr,"\t Chebyshev = 12\n"); exit(1); } @@ -136,21 +138,24 @@ int main(int argc, char *argv[]){ { case LibUtilities::eTetrahedron: if((btype1 == eOrtho_B) || (btype1 == eOrtho_C) - || (btype1 == eModified_B) || (btype1 == eModified_C)) + || (btype1 == eModified_B) || (btype1 == eModified_C) + || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 1 cannot be of type Ortho_B, " "Ortho_C, Modified_B or Modified_C"); } if((btype2 == eOrtho_A) || (btype2 == eOrtho_C) - || (btype2 == eModified_A) || (btype2 == eModified_C)) + || (btype2 == eModified_A) || (btype2 == eModified_C) + || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 2 cannot be of type Ortho_A, " "Ortho_C, Modified_A or Modified_C"); } if((btype3 == eOrtho_A) || (btype3 == eOrtho_B) - || (btype3 == eModified_A) || (btype3 == eModified_B)) + || (btype3 == eModified_A) || (btype3 == eModified_B) + || (btype1 == eModified_C)) { NEKERROR(ErrorUtil::efatal, "Basis 3 cannot be of type Ortho_A, " @@ -159,21 +164,24 @@ int main(int argc, char *argv[]){ break; case LibUtilities::ePyramid: if((btype1 == eOrtho_B) || (btype1 == eOrtho_C) - || (btype1 == eModified_B) || (btype1 == eModified_C)) + || (btype1 == eModified_B) || (btype1 == eModified_C) + || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 1 cannot be of type Ortho_B, " "Ortho_C, Modified_B or Modified_C"); } if((btype2 == eOrtho_B) || (btype2 == eOrtho_C) - || (btype2 == eModified_B) || (btype2 == eModified_C)) + || (btype2 == eModified_B) || (btype2 == eModified_C) + || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 2 cannot be of type Ortho_B, " "Ortho_C, Modified_B or Modified_C"); } if((btype3 == eOrtho_A) || (btype3 == eOrtho_B) - || (btype3 == eModified_A) || (btype3 == eModified_B)) + || (btype3 == eModified_A) || (btype3 == eModified_B) + || (btype1 == eModified_C)) { NEKERROR(ErrorUtil::efatal, "Basis 3 cannot be of type Ortho_A, " diff --git a/library/Demos/LocalRegions/PrismMesh.xml b/library/Demos/LocalRegions/PrismMesh.xml deleted file mode 100644 index ae54de9e7f68924d8008da465e73f77892fed65b..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/PrismMesh.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - 1.0 -1.0 1.0 - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 4 - 2 5 - 3 5 - 4 5 - - - - - - 0 1 2 3 - 0 5 4 - 1 6 8 5 - 2 7 6 - 3 7 8 4 - - - - - - - - - 0 1 2 3 4 - - - - - - R[0] - - - C[0] - - - diff --git a/library/Demos/LocalRegions/PyrMesh.xml b/library/Demos/LocalRegions/PyrMesh.xml deleted file mode 100644 index 6f414153ed5a030362f22bf4fd3a2f2312645a9b..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/PyrMesh.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 4 - 2 4 - 3 4 - - - - - - 0 1 2 3 - 0 5 4 - 1 6 5 - 2 7 6 - 3 7 4 - - - - - - - - -

0 1 2 3 4

- - - - - - P[0] - - - C[0] - -
- diff --git a/library/Demos/LocalRegions/Tests/LocProject2D_Def_Quad_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject2D_Def_Quad_Lagrange_Basis_P6_Q7.tst index aa336d92a27cdab4c3f69dfaa9ddc6a9ede2ec83..23daae93fb421cc9a145de9da0d744736e60ed0f 100644 --- a/library/Demos/LocalRegions/Tests/LocProject2D_Def_Quad_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject2D_Def_Quad_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ Project2D Deformed Quad Lagrange basis P=6 Q=7 LocProject2D - 4 8 8 6 6 7 7 0.0 0.0 1.0 0.0 1.5 1.5 0.0 1.0 + 4 10 10 6 6 7 7 0.0 0.0 1.0 0.0 1.5 1.5 0.0 1.0 0.0970116 diff --git a/library/Demos/LocalRegions/Tests/LocProject2D_Quad_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject2D_Quad_Lagrange_Basis_P6_Q7.tst index 59cc4a882486a485bd6de2740cc6ba045a70df5a..1f35b51263b429febcbee56217ff6ea586798c54 100644 --- a/library/Demos/LocalRegions/Tests/LocProject2D_Quad_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject2D_Quad_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ Project2D Quad Lagrange basis P=6 Q=7 LocProject2D - 4 8 8 6 6 7 7 0.0 0.0 1.0 0.0 1.0 1.0 0.0 1.0 + 4 10 10 6 6 7 7 0.0 0.0 1.0 0.0 1.0 1.0 0.0 1.0 1.81771e-15 diff --git a/library/Demos/LocalRegions/Tests/LocProject2D_Tri_Nodal_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject2D_Tri_Nodal_Basis_P6_Q7.tst index aaabb3c688fd66fcfcd731702813296e5f7eba71..8ae595c1c33788542d4f6cf5ce86c6de83eef6de 100644 --- a/library/Demos/LocalRegions/Tests/LocProject2D_Tri_Nodal_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject2D_Tri_Nodal_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ Project2D Triangle Nodal basis P=6 Q=7 LocProject2D - 3 13 13 6 6 7 7 0.0 0.0 1.0 1.0 0.5 1.0 + 3 15 15 6 6 7 7 0.0 0.0 1.0 1.0 0.5 1.0 1.84567e-15 diff --git a/library/Demos/LocalRegions/Tests/LocProject3D_Def_Hex_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject3D_Def_Hex_Lagrange_Basis_P6_Q7.tst index 7625ca350c80286389953595e00696d0c14c1128..3e698cebd2c1568a5283abffd491a34aa02c9ffb 100644 --- a/library/Demos/LocalRegions/Tests/LocProject3D_Def_Hex_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject3D_Def_Hex_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ Project3D Deformed Hex Lagrange basis P=6 Q=7 LocProject3D - 8 8 8 8 6 6 6 7 7 7 0 0 0 1 0 0 1 1.5 0 0 1 0 0 0 1 1.5 0 1 1 1 1 0 1 1.5 + 8 10 10 10 6 6 6 7 7 7 0 0 0 1 0 0 1 1.5 0 0 1 0 0 0 1 1.5 0 1 1 1 1 0 1 1.5 0.00820104 diff --git a/library/Demos/LocalRegions/Tests/LocProject3D_Hex_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject3D_Hex_Lagrange_Basis_P6_Q7.tst index ceb3bea75c59742d164c348b6a5953774e945224..565a8b5520bcb4b9220ed015b9f052181872a978 100644 --- a/library/Demos/LocalRegions/Tests/LocProject3D_Hex_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject3D_Hex_Lagrange_Basis_P6_Q7.tst @@ -2,13 +2,13 @@ Project3D Hex Lagrange basis P=6 Q=7 LocProject3D - 8 8 8 8 6 6 6 7 7 7 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 1 1 1 0 1 1 + 8 10 10 10 6 6 6 7 7 7 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 1 1 1 0 1 1 - 1.7955e-14 + 2.04616e-14 - 8.81073e-13 + 8.52651e-13 diff --git a/library/Demos/LocalRegions/Tests/LocProject3D_Pyr_Mod_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject3D_Pyr_Mod_Basis_P6_Q7.tst index 040db54843ae6246ff0f9c39e4da06e637f85179..9298084a7be02a0b18d0814f63d1ad6686dd406b 100644 --- a/library/Demos/LocalRegions/Tests/LocProject3D_Pyr_Mod_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject3D_Pyr_Mod_Basis_P6_Q7.tst @@ -2,13 +2,13 @@ Project3D Pyramid Modified basis P=6 Q=7 LocProject3D - 6 4 4 6 6 6 6 7 7 6 0 0 0 1 0 0 1 1 0 0 1 0 0.5 0.5 0.866 + 6 4 4 8 6 6 6 7 7 6 0 0 0 1 0 0 1 1 0 0 1 0 0.5 0.5 0.866 - 1.80317e-12 + 1.15445e-12 - 1.20881e-11 + 8.91376e-12 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Lin_Deformed_Quad_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Lin_Deformed_Quad_Lagrange_Basis_P6_Q7.tst index ec1835c35342a1e096eed06a6c78f0c65e125004..0a388ec49172bde12d3446a4a798f78985c229bf 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Lin_Deformed_Quad_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Lin_Deformed_Quad_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ LocProject_Diff2D Lin. Deformed Quad Lagrange Basis P=6, Q=7 LocProject_Diff2D - 4 8 8 6 6 7 7 0.0 0.0 1.0 0.0 1.5 1.5 0.0 1.0 + 4 10 10 6 6 7 7 0.0 0.0 1.0 0.0 1.5 1.5 0.0 1.0 0.288711 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Reg_Quad_Lagrange_Basis_P6_Q=7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Reg_Quad_Lagrange_Basis_P6_Q=7.tst index 119a70346b0ae28842c473ffc395b37afffb4cfd..de7568a069a88d4bf0850ee2958534bdeb30ae27 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Reg_Quad_Lagrange_Basis_P6_Q=7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Reg_Quad_Lagrange_Basis_P6_Q=7.tst @@ -2,10 +2,10 @@ LocProject_Diff2D Reg. Quad Lagrange Basis P=6, Q=7 LocProject_Diff2D - 4 8 8 6 6 7 7 0.0 0.0 1.0 0.0 1.0 1.0 0.0 1.0 + 4 10 10 6 6 7 7 0.0 0.0 1.0 0.0 1.0 1.0 0.0 1.0 - 6.14629e-14 + 6.80298e-14 4.83169e-13 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Tri_Nodal_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Tri_Nodal_Basis_P6_Q7.tst index 408ac25c71f83f0b82a10f67222d25a27bdb31d6..0c13f888ddbfe74ffaa0cff6dcf032bdc64e8583 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Tri_Nodal_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff2D_Tri_Nodal_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ LocProject_Diff2D Tri Nodal Basis P=6, Q=7 LocProject_Diff2D - 3 13 13 6 6 7 7 0.0 0.0 1.0 1.0 0.5 1.0 + 3 15 15 6 6 7 7 0.0 0.0 1.0 1.0 0.5 1.0 2.99203e-14 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Lin_Deformed_Hex_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Lin_Deformed_Hex_Lagrange_Basis_P6_Q7.tst index 264683b6f75fdf7104a4abda8685ae44c2900260..c400e8404a5a230749f6459c5cbc79d6fbb1f7d9 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Lin_Deformed_Hex_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Lin_Deformed_Hex_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ LocProject_Diff3D Lin. Deformed Hex Lagrange Basis, P=6, Q=7 LocProject_Diff3D - 8 8 8 8 6 6 6 7 7 7 0 0 0 1 0 0 1 1.5 0 0 1 0 0 0 1 1.5 0 1 1 1 1 0 1 1.5 + 8 10 10 10 6 6 6 7 7 7 0 0 0 1 0 0 1 1.5 0 0 1 0 0 0 1 1.5 0 1 1 1 1 0 1 1.5 0.0353098 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Hex_Lagrange_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Hex_Lagrange_Basis_P6_Q7.tst index 81d42e2a79fa4f5c627c7fee46a35023b663d7e2..2a951277a87c31c1e7ae35bd9a4843f7cea0b936 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Hex_Lagrange_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Hex_Lagrange_Basis_P6_Q7.tst @@ -2,7 +2,7 @@ LocProject_Diff3D Reg. Hex Lagrange Basis, P=6, Q=7 LocProject_Diff3D - 8 8 8 8 6 6 6 7 7 7 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 1 1 1 0 1 1 + 8 10 10 10 6 6 6 7 7 7 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 1 1 1 0 1 1 5.89495e-13 diff --git a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Pyr_Mod_Basis_P6_Q7.tst b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Pyr_Mod_Basis_P6_Q7.tst index 5ff49b8884dec428ec628418b1074e2967a2b513..0e4218d0ac4167c11083b3b4bf21a4379636b089 100644 --- a/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Pyr_Mod_Basis_P6_Q7.tst +++ b/library/Demos/LocalRegions/Tests/LocProject_Diff3D_Reg_Pyr_Mod_Basis_P6_Q7.tst @@ -2,13 +2,13 @@ LocProject_Diff3D Reg. Prism Modified Basis, P=6, Q=7 LocProject_Diff3D - 6 4 4 6 6 6 6 7 7 6 0 0 0 1 0 0 1 1 0 0 1 0 0.5 0 1 0.5 0.5 0.866 + 6 4 4 8 6 6 6 7 7 6 0 0 0 1 0 0 1 1 0 0 1 0 0.5 0 1 0.5 0.5 0.866 - 5.9107e-12 + 4.64852e-12 - 3.81348e-11 + 2.413e-11 diff --git a/library/Demos/LocalRegions/TetMesh.xml b/library/Demos/LocalRegions/TetMesh.xml deleted file mode 100644 index a5559631c713136b9513c99ea9d5276c480de189..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/TetMesh.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - - - - - - 0 1 - 1 2 - 2 0 - 0 3 - 1 3 - 2 3 - - - - - - 0 1 2 - 0 4 3 - 1 5 4 - 2 5 3 - - - - - - - - - 0 1 2 3 - - - - - - A[0] - - - C[0] - - - diff --git a/library/Demos/LocalRegions/XmlFiles/HexMesh.xml b/library/Demos/LocalRegions/XmlFiles/HexMesh.xml deleted file mode 100644 index c0857979c698eabf390851298fc865165cd8af06..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/XmlFiles/HexMesh.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - 1.0 -1.0 1.0 - 1.0 1.0 1.0 - -1.0 1.0 1.0 - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 5 - 2 6 - 3 7 - 4 5 - 5 6 - 6 7 - 7 4 - - - - - 0 1 2 3 - 0 5 8 4 - 1 6 9 5 - 2 7 10 6 - 3 4 11 7 - 8 9 10 11 - - - - - - - - - 0 1 2 3 4 5 - - - - - - H[0] - - - C[0] - - - diff --git a/library/Demos/LocalRegions/XmlFiles/PrismMesh.xml b/library/Demos/LocalRegions/XmlFiles/PrismMesh.xml deleted file mode 100644 index 5553afb5bb59385a5ed4da9e14b24b90e463d3d7..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/XmlFiles/PrismMesh.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - 1.0 -1.0 1.0 - - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 4 - 2 5 - 3 5 - 4 5 - - - - - - 0 1 2 3 - 0 5 4 - 1 6 8 5 - 2 7 6 - 3 7 8 4 - - - - - - - - - 0 1 2 3 4 - - - - - - R[0] - - - C[0] - - - diff --git a/library/Demos/LocalRegions/XmlFiles/PyrMesh.xml b/library/Demos/LocalRegions/XmlFiles/PyrMesh.xml deleted file mode 100644 index 6f414153ed5a030362f22bf4fd3a2f2312645a9b..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/XmlFiles/PyrMesh.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - 1.0 1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - - - - - - 0 1 - 1 2 - 2 3 - 3 0 - 0 4 - 1 4 - 2 4 - 3 4 - - - - - - 0 1 2 3 - 0 5 4 - 1 6 5 - 2 7 6 - 3 7 4 - - - - - - - - -

0 1 2 3 4

- -
- - - - P[0] - - - C[0] - -
-
diff --git a/library/Demos/LocalRegions/XmlFiles/TetMesh.xml b/library/Demos/LocalRegions/XmlFiles/TetMesh.xml deleted file mode 100644 index a5559631c713136b9513c99ea9d5276c480de189..0000000000000000000000000000000000000000 --- a/library/Demos/LocalRegions/XmlFiles/TetMesh.xml +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - -1.0 -1.0 -1.0 - 1.0 -1.0 -1.0 - -1.0 1.0 -1.0 - -1.0 -1.0 1.0 - - - - - - 0 1 - 1 2 - 2 0 - 0 3 - 1 3 - 2 3 - - - - - - 0 1 2 - 0 4 3 - 1 5 4 - 2 5 3 - - - - - - - - - 0 1 2 3 - - - - - - A[0] - - - C[0] - - - diff --git a/library/Demos/Misc/multi_array.cpp b/library/Demos/Misc/multi_array.cpp deleted file mode 100644 index d9f539827a14fd51b1458c46f79e9720268b976d..0000000000000000000000000000000000000000 --- a/library/Demos/Misc/multi_array.cpp +++ /dev/null @@ -1,101 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include - -typedef boost::multi_array ThreeDArray; - -using boost::shared_ptr; - -void PassLocalCopy(shared_ptr &ar) -{ - static shared_ptr localArray = Nektar::MemoryManager::AllocateSharedPtr(boost::extents[4][5][6]); - - (*localArray)[3][4][5] = 10.0; - - ar = localArray; -} - -shared_ptr ReturnLocalCopy(void) -{ - static shared_ptr localArray = Nektar::MemoryManager::AllocateSharedPtr(boost::extents[4][5][6]); - - (*localArray)[3][4][5] = 10.0; - - return localArray; -} - -int main(void) -{ - // The three different types of access to multiarrays. - typedef boost::multi_array OneDArrayT; - typedef boost::multi_array_ref OneDArrayRefT; - typedef boost::const_multi_array_ref OneDConstArrayRefT; - - - // Allocate 50 element array with indices ranging from 0 to 49 (default). - OneDArrayT my1dArray(boost::extents[50]); - my1dArray[20] = 5; - - // Share the data from the above array with another array of the smaller extent. - // object.data() retrieves the multi_array's underlying data. Wrap the last - // 30 elements in the reference. - OneDArrayRefT my1dArrayRef(my1dArray.data()+20, boost::extents[30]); - - my1dArrayRef[0] = 7; - - // Create a view of the matrix from the range in the original matrix of 20 to 39. - // The view will still use 0 to 19 as indices. However, you can't use *.data() to - // retrieve the data of the view. - - // Allows a range to be specified. This is used to provide a view of my1dArray, - // below. A view is a way to look at a part of the array, or to remap the array - // into different shape. - typedef OneDArrayT::index_range range; - - OneDArrayT::array_view<1>::type my1Dview = my1dArray[boost::indices[range(20,40)]]; - - // This refers to the same element used above when it was assigned to 5 then changed - // to 7 in the reference. - std::cout << my1Dview[0] << std::endl; - - // Shape just tells me the extent of the array in each dimension. If we did not - // use a base of 0 then we would have to take that into account. We can use - // iterators here that simplifies this. - const OneDArrayT::size_type *sizes = my1Dview.shape(); - OneDArrayT::index index; - for (index = 0; index::type my2DSubarray = my3dArray[1]; - - // To pass such an array we need to wrap it in a shared_ptr, then all - // the usual syntax applies including constifying. - boost::shared_ptr ar; - PassLocalCopy(ar); - - std::cout << (*ar)[3][4][5] << std::endl; - - // Return a shared_ptr to the one allocated internally. Pass back - // as the return value. - boost::shared_ptr ar2; - ar2 = ReturnLocalCopy(); - - std::cout << (*ar)[3][4][5] << std::endl; - - return 0; -} diff --git a/library/Demos/MultiRegions/CMakeLists.txt b/library/Demos/MultiRegions/CMakeLists.txt index 184a6f435410b1e6060828c4bc90ce364376ac8d..2b979fec3f952ac2f7887ab3c9a2f12b377f9897 100644 --- a/library/Demos/MultiRegions/CMakeLists.txt +++ b/library/Demos/MultiRegions/CMakeLists.txt @@ -1,76 +1,36 @@ -SET(LinkLibraries MultiRegions) - -SET(HelmholtzCont1DSource Helmholtz1D.cpp) -ADD_NEKTAR_EXECUTABLE(Helmholtz1D demos HelmholtzCont1DSource) -TARGET_LINK_LIBRARIES(Helmholtz1D ${LinkLibraries}) - -SET(HelmholtzCont2DSource Helmholtz2D.cpp) -ADD_NEKTAR_EXECUTABLE(Helmholtz2D demos HelmholtzCont2DSource) -TARGET_LINK_LIBRARIES(Helmholtz2D ${LinkLibraries}) - -#SET(Helmholtz2DHomo1DSource Helmholtz2DHomo1D.cpp) -#ADD_NEKTAR_EXECUTABLE(Helmholtz2DHomo1D Helmholtz2DHomo1DSource) -#TARGET_LINK_LIBRARIES(Helmholtz2DHomo1D ${LinkLibraries}) - -SET(HelmholtzCont3DSource Helmholtz3D.cpp) -ADD_NEKTAR_EXECUTABLE(Helmholtz3D demos HelmholtzCont3DSource) -TARGET_LINK_LIBRARIES(Helmholtz3D ${LinkLibraries}) - -SET(Helmholtz3DHomo1DSource Helmholtz3DHomo1D.cpp) -ADD_NEKTAR_EXECUTABLE(Helmholtz3DHomo1D demos Helmholtz3DHomo1DSource) -TARGET_LINK_LIBRARIES(Helmholtz3DHomo1D ${LinkLibraries}) - -SET(Helmholtz3DHomo2DSource Helmholtz3DHomo2D.cpp) -ADD_NEKTAR_EXECUTABLE(Helmholtz3DHomo2D demos Helmholtz3DHomo2DSource) -TARGET_LINK_LIBRARIES(Helmholtz3DHomo2D ${LinkLibraries}) - -SET(HDGHelmholtz1DSource HDGHelmholtz1D.cpp) -ADD_NEKTAR_EXECUTABLE(HDGHelmholtz1D demos HDGHelmholtz1DSource) -TARGET_LINK_LIBRARIES(HDGHelmholtz1D ${LinkLibraries}) - -SET(HDGHelmholtz2DSource HDGHelmholtz2D.cpp) -ADD_NEKTAR_EXECUTABLE(HDGHelmholtz2D demos HDGHelmholtz2DSource) -TARGET_LINK_LIBRARIES(HDGHelmholtz2D ${LinkLibraries}) - -SET(PostProcHDG2DSource PostProcHDG2D.cpp) -ADD_NEKTAR_EXECUTABLE(PostProcHDG2D demos PostProcHDG2DSource) -TARGET_LINK_LIBRARIES(PostProcHDG2D ${LinkLibraries}) - -SET(HDGHelmholtz3DSource HDGHelmholtz3D.cpp) -ADD_NEKTAR_EXECUTABLE(HDGHelmholtz3D demos HDGHelmholtz3DSource) -TARGET_LINK_LIBRARIES(HDGHelmholtz3D ${LinkLibraries}) - -SET(PostProcHDG3DSource PostProcHDG3D.cpp) -ADD_NEKTAR_EXECUTABLE(PostProcHDG3D demos PostProcHDG3DSource) -TARGET_LINK_LIBRARIES(PostProcHDG3D ${LinkLibraries}) - -SET(HDGHelmholtz3DHomo1DSource HDGHelmholtz3DHomo1D.cpp) -ADD_NEKTAR_EXECUTABLE(HDGHelmholtz3DHomo1D demos HDGHelmholtz3DHomo1DSource) -TARGET_LINK_LIBRARIES(HDGHelmholtz3DHomo1D ${LinkLibraries}) - -SET(Deriv3DHomo1D_SingleModeSource Deriv3DHomo1D_SingleMode.cpp) -ADD_NEKTAR_EXECUTABLE(Deriv3DHomo1D_SingleMode demos Deriv3DHomo1D_SingleModeSource) -TARGET_LINK_LIBRARIES(Deriv3DHomo1D_SingleMode ${LinkLibraries}) - -SET(Deriv3DHomo1DSource Deriv3DHomo1D.cpp) -ADD_NEKTAR_EXECUTABLE(Deriv3DHomo1D demos Deriv3DHomo1DSource) -TARGET_LINK_LIBRARIES(Deriv3DHomo1D ${LinkLibraries}) - -SET(Deriv3DHomo2DSource Deriv3DHomo2D.cpp) -ADD_NEKTAR_EXECUTABLE(Deriv3DHomo2D demos Deriv3DHomo2DSource) -TARGET_LINK_LIBRARIES(Deriv3DHomo2D ${LinkLibraries}) - -SET(SteadyAdvectionDiffusionReactionCont2DSource SteadyAdvectionDiffusionReaction2D.cpp) -ADD_NEKTAR_EXECUTABLE(SteadyAdvectionDiffusionReaction2D demos SteadyAdvectionDiffusionReactionCont2DSource) -TARGET_LINK_LIBRARIES(SteadyAdvectionDiffusionReaction2D ${LinkLibraries}) - -# Generate list of available subdirectories -FILE(GLOB dir_list "*") -FOREACH(dir ${dir_list}) - IF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) - ADD_SUBDIRECTORY(${dir}) - ENDIF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) -ENDFOREACH(dir ${dir_list}) +ADD_NEKTAR_EXECUTABLE(Helmholtz1D + COMPONENT demos DEPENDS MultiRegions SOURCES Helmholtz1D.cpp) +ADD_NEKTAR_EXECUTABLE(Helmholtz2D + COMPONENT demos DEPENDS MultiRegions SOURCES Helmholtz2D.cpp) +ADD_NEKTAR_EXECUTABLE(Helmholtz3D + COMPONENT demos DEPENDS MultiRegions SOURCES Helmholtz3D.cpp) +ADD_NEKTAR_EXECUTABLE(Helmholtz3DHomo1D + COMPONENT demos DEPENDS MultiRegions SOURCES Helmholtz3DHomo1D.cpp) +ADD_NEKTAR_EXECUTABLE(Helmholtz3DHomo2D + COMPONENT demos DEPENDS MultiRegions SOURCES Helmholtz3DHomo2D.cpp) +ADD_NEKTAR_EXECUTABLE(HDGHelmholtz1D + COMPONENT demos DEPENDS MultiRegions SOURCES HDGHelmholtz1D.cpp) +ADD_NEKTAR_EXECUTABLE(HDGHelmholtz2D + COMPONENT demos DEPENDS MultiRegions SOURCES HDGHelmholtz2D.cpp) +ADD_NEKTAR_EXECUTABLE(HDGHelmholtz3D + COMPONENT demos DEPENDS MultiRegions SOURCES HDGHelmholtz3D.cpp) +ADD_NEKTAR_EXECUTABLE(HDGHelmholtz3DHomo1D + COMPONENT demos DEPENDS MultiRegions SOURCES HDGHelmholtz3DHomo1D.cpp) +ADD_NEKTAR_EXECUTABLE(PostProcHDG2D + COMPONENT demos DEPENDS MultiRegions SOURCES PostProcHDG2D.cpp) +ADD_NEKTAR_EXECUTABLE(PostProcHDG3D + COMPONENT demos DEPENDS MultiRegions SOURCES PostProcHDG3D.cpp) +ADD_NEKTAR_EXECUTABLE(Deriv3DHomo1D_SingleMode + COMPONENT demos DEPENDS MultiRegions SOURCES Deriv3DHomo1D_SingleMode.cpp) +ADD_NEKTAR_EXECUTABLE(Deriv3DHomo1D + COMPONENT demos DEPENDS MultiRegions SOURCES Deriv3DHomo1D.cpp) +ADD_NEKTAR_EXECUTABLE(Deriv3DHomo2D + COMPONENT demos DEPENDS MultiRegions SOURCES Deriv3DHomo2D.cpp) +ADD_NEKTAR_EXECUTABLE(SteadyAdvectionDiffusionReaction2D + COMPONENT demos DEPENDS MultiRegions SOURCES SteadyAdvectionDiffusionReaction2D.cpp) + +# Add ExtraDemos subdirectory. +ADD_SUBDIRECTORY(ExtraDemos) ADD_NEKTAR_TEST(Helmholtz1D_CG_P8) ADD_NEKTAR_TEST(Helmholtz1D_CG_P8_periodic) @@ -95,7 +55,7 @@ ADD_NEKTAR_TEST(Helmholtz2D_HDG_P7_Modes_AllBCs) ADD_NEKTAR_TEST(Helmholtz2D_CG_varP_Modes) -ADD_NEKTAR_TEST_LENGTHY(Helmholtz3D_CG_Hex) +ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex LENGTHY) ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex_Heterogeneous) ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex_AllBCs) ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex_AllBCs_iter_ml) @@ -107,9 +67,9 @@ ADD_NEKTAR_TEST(Helmholtz3D_CG_Prism_Deformed) ADD_NEKTAR_TEST(Helmholtz3D_CG_Pyr) ADD_NEKTAR_TEST(Helmholtz3D_CG_Pyr_Deformed) ADD_NEKTAR_TEST(Helmholtz3D_CG_Homo1D) -ADD_NEKTAR_TEST_LENGTHY(Helmholtz3D_HDG_Homo1D) +ADD_NEKTAR_TEST(Helmholtz3D_HDG_Homo1D LENGTHY) ADD_NEKTAR_TEST(Helmholtz3D_HDG_Prism) -ADD_NEKTAR_TEST_LENGTHY(Helmholtz3D_HDG_Hex_AllBCs) +ADD_NEKTAR_TEST(Helmholtz3D_HDG_Hex_AllBCs LENGTHY) ADD_NEKTAR_TEST(Helmholtz3D_HDG_Tet) ADD_NEKTAR_TEST(LinearAdvDiffReact2D_P7_Modes) @@ -133,7 +93,7 @@ IF (NEKTAR_USE_MPI) ADD_NEKTAR_TEST(Helmholtz2D_CG_P7_Modes_xxt_ml) ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex_AllBCs_iter_ml_par3) ADD_NEKTAR_TEST(Helmholtz3D_CG_Prism_iter_ml_par3) - ADD_NEKTAR_TEST_LENGTHY(Helmholtz3D_CG_Hex_AllBCs_xxt_sc_par3) + ADD_NEKTAR_TEST(Helmholtz3D_CG_Hex_AllBCs_xxt_sc_par3 LENGTHY) ADD_NEKTAR_TEST(Helmholtz2D_CG_P14_xxt_per) ADD_NEKTAR_TEST(Helmholtz2D_CG_varP_Modes_par) @@ -141,7 +101,7 @@ IF (NEKTAR_USE_MPI) # To be resolved in a separate branch. # ADD_NEKTAR_TEST(Helmholtz3D_CG_Tet_iter_global_par3) - ADD_NEKTAR_TEST_LENGTHY(Helmholtz2D_HDG_P7_Modes_AllBCs_par2) + ADD_NEKTAR_TEST(Helmholtz2D_HDG_P7_Modes_AllBCs_par2 LENGTHY) ADD_NEKTAR_TEST(Helmholtz3D_HDG_Prism_par2) ADD_NEKTAR_TEST(Helmholtz3D_HDG_Hex_AllBCs_par2) diff --git a/library/Demos/MultiRegions/ExtraDemos/Advection.cpp b/library/Demos/MultiRegions/ExtraDemos/Advection.cpp deleted file mode 100644 index 1852c1260041c40c6032cc595260cff6d9b2b783..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/Advection.cpp +++ /dev/null @@ -1,283 +0,0 @@ -#include -#include -#include - -#include - -using namespace std; -using namespace Nektar; - - -void rhsFunction(MultiRegions::DisContField2DSharedPtr hpExp, - Array v1, - Array v2, - Array v1Trace, - Array v2Trace, - Array u0, - Array &rhs, - const NekDouble time); - -int main(int argc, char *argv[]) -{ - - if(argc != 2) - { - fprintf(stderr,"Usage: AdvectionDis meshfile \n"); - exit(1); - } - - cout << "=====================================================" <::AllocateSharedPtr(mesh,boundaryConds,"u"); - //----------------------------------------- - - //----------------------------------------- - // Read and evaluate the initial conditions - int nTotQuadPoints = u->GetTotPoints(); - - Array x1(nTotQuadPoints,0.0); - Array x2(nTotQuadPoints,0.0); - - u->GetCoords(x1,x2); - - Array u0(nTotQuadPoints); - Array u1(nTotQuadPoints); - Array v1(nTotQuadPoints,0.0); - Array v2(nTotQuadPoints,0.0); - - SpatialDomains::ConstForcingFunctionShPtr u0SolutionEquation - = boundaryConds.GetExactSolution(boundaryConds.GetVariable(0)); - - u0SolutionEquation->Evaluate(x1,x2, u0); - for(int i = 0; i < nTotQuadPoints; ++i) - { - v1[i] = 1.0;//2.0*M_PI*x2[i]; - v2[i] = 0.0;//-2.0*M_PI*x1[i]; - } - - u->SetPhys(u0); - //----------------------------------------------- - - //----------------------------------------------- - // Initiate the velocities on the trace - int nTotTracePoints = u->GetTrace()->GetNpoints(); - - Array v1Trace(nTotTracePoints,0.0); - Array v2Trace(nTotTracePoints,0.0); - - u->SetPhys(v1); - u->ExtractTracePhys(v1Trace); - - u->SetPhys(v2); - u->ExtractTracePhys(v2Trace); - - u->SetPhys(u0); - //----------------------------------------------- - - //----------------------------------------------- - // Write initial conditions to file - stringstream outfileName; - outfileName << "Advection_Quad_P9_initial.dat"; - ofstream outfile((outfileName.str()).data()); - u->WriteToFile(outfile,eTecplot); - //----------------------------------------------- - - //----------------------------------------------- - // RK vectors - Array f1(nTotQuadPoints,0.0); - Array f2(nTotQuadPoints,0.0); - //----------------------------------------------- - - //----------------------------------------------- - // Time stepping parameters - NekDouble dt = boundaryConds.GetParameter("Dt"); - int timeStep = boundaryConds.GetParameter("Steps"); - int chkStep = boundaryConds.GetParameter("Check"); - //----------------------------------------------- - - //----------------------------------------------- - // start the time loop - int chk = 0; - for (int i = 1; i < timeStep + 1; ++i) - { - - // Store the starting value - u0 = u->GetPhys(); - - //RK step one - rhsFunction(u,v1,v2,v1Trace,v2Trace,u0,f1,i*dt); - Vmath::Svtvp(nTotQuadPoints,dt,f1,1,u0,1,u1,1); - u->SetPhys(u1); - - - //RK step two - rhsFunction(u,v1,v2,v1Trace,v2Trace,u1,f2,i*dt+dt); - for(int j = 0; j < nTotQuadPoints; ++j) - { - u1[j] = u0[j] + dt *(0.5*f1[j]+0.5*f2[j]); - } - u->SetPhys(u1); - - - //----------------------------------------------- - // Write chk files - if (i%chkStep == 0) - { - cout << "Time: " << i*dt << endl; - stringstream outfileName; - outfileName << "Advection_Quad_P9_"<< chk <<".dat"; - ofstream outfile((outfileName.str()).data()); - u->WriteToFile(outfile,eTecplot); - ++chk; - } - //----------------------------------------------- - - //----------------------------------------------- - // Compute the L2 error - if (i == timeStep) - { - Array exactSolution(nTotQuadPoints); - SpatialDomains::ConstForcingFunctionShPtr exactSolutionEquation - = boundaryConds.GetExactSolution(boundaryConds.GetVariable(0)); - - exactSolutionEquation->Evaluate(x1,x2, exactSolution); - MultiRegions::DisContField2DSharedPtr exactSolutionExp = - MemoryManager::AllocateSharedPtr(*u); - exactSolutionExp->SetPhys(exactSolution); - - NekDouble error = u->L2(exactSolutionExp->GetPhys()); - - // Display the output - cout << "Ndof = " << u->GetNcoeffs() << " => Error = " << error << endl; - } - //----------------------------------------------- - - } - } -} - - -void rhsFunction(MultiRegions::DisContField2DSharedPtr hpExp, - Array v1, - Array v2, - Array v1Trace, - Array v2Trace, - Array u0, - Array &rhs, const NekDouble time) -{ - - int nTotQuadPoints = hpExp->GetTotPoints(); - int nTotCoeffs = hpExp->GetNcoeffs(); - int nTotTracePoints = hpExp->GetTrace()->GetNpoints(); - - //--------------------------------------- - // get the trace values - - Array Fwd(nTotTracePoints,0.0); - Array Bwd(nTotTracePoints,0.0); - Array upwind1(nTotTracePoints,0.0); - Array upwind2(nTotTracePoints,0.0); - - Array > normals; - - hpExp->GetFwdBwdTracePhys(Fwd,Bwd); - //--------------------------------------- - - - //--------------------------------------- - // Compute the flux vector - - Array fluxVector0(nTotQuadPoints,0.0); - Array fluxVector1(nTotQuadPoints,0.0); - - // Fill the flux vector using the old time step - for (int i = 0; i < nTotQuadPoints; ++i) - { - fluxVector0[i] = v1[i]*u0[i]; - fluxVector1[i] = v2[i]*u0[i]; - } - //--------------------------------------- - - - Array iprod_rhs(nTotCoeffs,0.0); - Array tmp(nTotCoeffs,0.0); - Array iprod_rhs_test(nTotCoeffs,0.0); - //-------------------------------------- - - - //--------------------------------------- - // Compute the (Grad \phi \cdot F) - - hpExp->IProductWRTDerivBase(0,fluxVector0,iprod_rhs); - hpExp->IProductWRTDerivBase(1,fluxVector1,tmp); - Vmath::Vadd(nTotCoeffs,tmp,1,iprod_rhs,1,iprod_rhs,1); - //--------------------------------------- - - - //--------------------------------------- - // Evaluate upwind numerical flux - // note: upwind1 contains a_1 * upwind - // upwind2 contains a_2 * upwind - -#if 1 - Array > Vel(2); - Vel[0] = v1Trace; - Vel[1] = v2Trace; - - hpExp->GetTrace()->Upwind(Vel,Fwd,Bwd,upwind1); - - Vmath::Vmul(nTotTracePoints,upwind1,1,v2Trace,1,upwind2,1); - Vmath::Vmul(nTotTracePoints,upwind1,1,v1Trace,1,upwind1,1); -#else - for (int i = 0; i < nTotTracePoints; ++i) - { - if (v1Trace[i]*normals[0][i]+v2Trace[i]*normals[1][i] > 0.0) - { - upwind1[i] = v1Trace[i]*Fwd[i]; - upwind2[i] = v2Trace[i]*Fwd[i]; - } - else - { - upwind1[i] = v1Trace[i]*Bwd[i]; - upwind2[i] = v2Trace[i]*Bwd[i]; - } - } -#endif - - Vmath::Neg(nTotTracePoints,upwind1,1); - Vmath::Neg(nTotTracePoints,upwind2,1); - - // this should then be changed - hpExp->AddTraceIntegral(upwind1, upwind2, iprod_rhs); - - hpExp->MultiplyByElmtInvMass(iprod_rhs,hpExp->UpdateCoeffs()); - - - //--------------------------------------- - // Go to physical space - hpExp->BwdTrans(hpExp->GetCoeffs(),rhs); - //--------------------------------------- -} diff --git a/library/Demos/MultiRegions/ExtraDemos/CMakeLists.txt b/library/Demos/MultiRegions/ExtraDemos/CMakeLists.txt index 9fa3bf95a80ce1caf01ea237620e1ebdc63a4680..d4fd6abb88e00f223e77a297d9ce9938722e1e8a 100644 --- a/library/Demos/MultiRegions/ExtraDemos/CMakeLists.txt +++ b/library/Demos/MultiRegions/ExtraDemos/CMakeLists.txt @@ -1,48 +1,23 @@ -SET(LinkLibraries MultiRegions) - -SET(ProjectLoc1DSource ProjectLoc1D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectLoc1D extra-demos ProjectLoc1DSource) -TARGET_LINK_LIBRARIES(ProjectLoc1D ${LinkLibraries}) - -SET(ProjectCont1DSource ProjectCont1D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectCont1D extra-demos ProjectCont1DSource) -TARGET_LINK_LIBRARIES(ProjectCont1D ${LinkLibraries}) - -SET(ProjectLoc3DSource ProjectLoc3D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectLoc3D extra-demos ProjectLoc3DSource) -TARGET_LINK_LIBRARIES(ProjectLoc3D ${LinkLibraries}) - -SET(ProjectCont3DSource ProjectCont3D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectCont3D extra-demos ProjectCont3DSource) -TARGET_LINK_LIBRARIES(ProjectCont3D ${LinkLibraries}) - -SET(ProjectContField3DSource ProjectContField3D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectContField3D extra-demos ProjectContField3DSource) -TARGET_LINK_LIBRARIES(ProjectContField3D ${LinkLibraries}) - -SET(ProjectLoc2DSource ProjectLoc2D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectLoc2D extra-demos ProjectLoc2DSource) -TARGET_LINK_LIBRARIES(ProjectLoc2D ${LinkLibraries}) - -SET(ProjectCont2DSource ProjectCont2D.cpp) -ADD_NEKTAR_EXECUTABLE(ProjectCont2D extra-demos ProjectCont2DSource) -TARGET_LINK_LIBRARIES(ProjectCont2D ${LinkLibraries}) - -SET(SteadyLinearAdvectionReactionCont2DSource SteadyLinearAdvectionReaction2D.cpp) -ADD_NEKTAR_EXECUTABLE(SteadyLinearAdvectionReaction2D extra-demos SteadyLinearAdvectionReactionCont2DSource) -TARGET_LINK_LIBRARIES(SteadyLinearAdvectionReaction2D ${LinkLibraries}) - -#SET(EigValsLinearAdvectionCont2DSource EigValsLinearAdvection2D.cpp) -#ADD_NEKTAR_EXECUTABLE(EigValsLinearAdvection2D EigValsLinearAdvectionCont2DSource) -#TARGET_LINK_LIBRARIES(EigValsLinearAdvection2D ${LinkLibraries}) -#SET_LAPACK_LINK_LIBRARIES(EigValsLinearAdvection2D) - -SET(PostProcessingSource PostProcessing.cpp) -ADD_NEKTAR_EXECUTABLE(PostProcessing extra-demos PostProcessingSource) -TARGET_LINK_LIBRARIES(PostProcessing ${LinkLibraries}) +ADD_NEKTAR_EXECUTABLE(ProjectLoc1D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectLoc1D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectLoc2D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectLoc2D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectLoc3D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectLoc3D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectCont1D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectCont1D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectCont2D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectCont2D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectCont3D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectCont3D.cpp) +ADD_NEKTAR_EXECUTABLE(ProjectContField3D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES ProjectContField3D.cpp) +ADD_NEKTAR_EXECUTABLE(SteadyLinearAdvectionReaction2D + COMPONENT demos-extra DEPENDS MultiRegions SOURCES SteadyLinearAdvectionReaction2D.cpp) +ADD_NEKTAR_EXECUTABLE(PostProcessing + COMPONENT demos-extra DEPENDS MultiRegions SOURCES PostProcessing.cpp) ADD_NEKTAR_TEST(ProjectionCont2D_P7) - IF (NEKTAR_USE_MPI) ADD_NEKTAR_TEST(ProjectionCont2D_P7_par) ENDIF (NEKTAR_USE_MPI) diff --git a/library/Demos/MultiRegions/ExtraDemos/EigValsLinearAdvection2D.cpp b/library/Demos/MultiRegions/ExtraDemos/EigValsLinearAdvection2D.cpp deleted file mode 100644 index 0f345064d839ede1e852133dd0b8ec8446eeebd3..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/EigValsLinearAdvection2D.cpp +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include - -#include -#include -#include -#include - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - LibUtilities::SessionReaderSharedPtr vSession - = LibUtilities::SessionReader::CreateInstance(argc, argv); - - string meshfile(vSession->GetFilename()); - - MultiRegions::ContField2DSharedPtr Exp; - int i, nq, coordim; - Array fce; - Array xc0,xc1,xc2; - NekDouble ax,ay; - - if(argc != 3) - { - fprintf(stderr,"Usage: EigValsLinearAdvection2D meshfile boundaryfile\n"); - exit(1); - } - - //---------------------------------------------- - // Read in mesh from input file - SpatialDomains::MeshGraph2D graph2D; - - graph2D.ReadGeometry(meshfile); - graph2D.ReadExpansions(meshfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - string bcfile(argv[argc-1]); - SpatialDomains::BoundaryConditions bcs(&graph2D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Get Advection Velocity - ax = bcs.GetParameter("Advection_x"); - ay = bcs.GetParameter("Advection_y"); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - const SpatialDomains::ExpansionMap &expansions = graph2D.GetExpansions(); - LibUtilities::BasisKey bkey0 - = expansions.begin()->second->m_basisKeyVector[0]; - LibUtilities::BasisKey bkey1 - = expansions.begin()->second->m_basisKeyVector[1]; - cout << "Calc. LinearAdvection E-vals:" << endl; - cout << " Advection_x : " << ax << endl; - cout << " Advection_y : " << ay << endl; - cout << " Expansion : (" << SpatialDomains::kExpansionTypeStr[bkey0.GetBasisType()] <<","<< SpatialDomains::kExpansionTypeStr[bkey1.GetBasisType()] << ")" << endl; - cout << " No. modes : " << bkey0.GetNumModes() << endl; - cout << endl; - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager:: - AllocateSharedPtr(vSession,graph2D,bcs); - //---------------------------------------------- - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetTotPoints(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Evaluate Eigenvalues/vector of Linear Advection Op. - int nmodes = Exp->GetNcoeffs(); - Array Real(nmodes), Imag(nmodes), Evecs(nmodes*nmodes); - - Exp->LinearAdvectionEigs(ax,ay,Real,Imag,Evecs); - //---------------------------------------------- - - cout << "Eignvalues: [Real, Imag]" << endl; - for(i = 0; i < Real.num_elements(); ++i) - { - cout << Real[i] <<", "<< Imag[i] << endl; - } - - return 0; -} - diff --git a/library/Demos/MultiRegions/ExtraDemos/HDGHelmholtz3D.cpp b/library/Demos/MultiRegions/ExtraDemos/HDGHelmholtz3D.cpp deleted file mode 100644 index 084015f0df3208beb89984696fa274d7111f76ac..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/HDGHelmholtz3D.cpp +++ /dev/null @@ -1,174 +0,0 @@ -#include -#include - -#include - -#define TIMING - -#ifdef TIMING -#include -#define Timing(s) \ - fprintf(stdout,"%s Took %g seconds\n",s,(clock()-st)/cps); \ - st = clock(); -#else -#define Timing(s) \ - /* Nothing */ -#endif - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - MultiRegions::DisContField3DSharedPtr Exp, Fce; - MultiRegions::ExpListSharedPtr DerExp1, DerExp2, DerExp3; - int i, nq, coordim; - Array fce; - Array xc0,xc1,xc2; - NekDouble lambda; - double st, cps = (double)CLOCKS_PER_SEC; - - if(argc != 3) - { - fprintf(stderr,"Usage: Helmholtz3D meshfile boundaryfile\n"); - exit(1); - } - - //---------------------------------------------- - // Read in mesh from input file - string meshfile(argv[1]); - SpatialDomains::MeshGraph3D graph3D; - graph3D.ReadGeometry(meshfile); - graph3D.ReadExpansions(meshfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - string bcfile(argv[2]); - SpatialDomains::BoundaryConditions bcs(&graph3D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - lambda = bcs.GetParameter("Lambda"); - const SpatialDomains::ExpansionVector &expansions = graph3D.GetExpansions(); - cout << "Solving 3D Helmholtz:" << endl; - cout << " Lambda : " << lambda << endl; -#if 0 - for(i = 0; i < expansions.size(); ++i) - { - LibUtilities::BasisKey bkey = graph2D.GetBasisKey(expansions[i],0); - cout << " Element " << i << " : " << endl; - cout << " Expansion : " << LibUtilities::BasisTypeMap[bkey.GetBasisType()] << endl; - cout << " No. modes : " << bkey.GetNumModes() << endl; - } - cout << endl; -#endif - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager:: - AllocateSharedPtr(graph3D,bcs); - //---------------------------------------------- - Timing("Read files and define exp .."); - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetPointsTot(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Define forcing function for first variable defined in file - fce = Array(nq); - SpatialDomains::ConstForcingFunctionShPtr ffunc - = bcs.GetForcingFunction(bcs.GetVariable(0)); - - ffunc->Evaluate(xc0,xc1,xc2, fce); - //---------------------------------------------- - - - //---------------------------------------------- - // Setup expansion containing the forcing function - Fce = MemoryManager::AllocateSharedPtr(*Exp); - Fce->SetPhys(fce); - //---------------------------------------------- - Timing("Define forcing .."); - - //---------------------------------------------- - // Helmholtz solution taking physical forcing - Exp->HelmSolve(*Fce, lambda); - //---------------------------------------------- - - Timing("Helmholtz Solve .."); - -#if 0 - for(i = 0; i < 100; ++i) - { - Exp->HelmSolve(*Fce, lambda); - } - - Timing("100 Helmholtz Solves:... "); -#endif - - //---------------------------------------------- - // Backward Transform Solution to get solved values at - Exp->BwdTrans(*Exp); - //---------------------------------------------- - Timing("Backard Transform .."); - - //---------------------------------------------- - // Write solution - ofstream outfile("UDGHelmholtzFile3D.dat"); - Exp->WriteToFile(outfile); - //---------------------------------------------- - - //---------------------------------------------- - // See if there is an exact solution, if so - // evaluate and plot errors - SpatialDomains::ConstExactSolutionShPtr ex_sol = - bcs.GetExactSolution(bcs.GetVariable(0)); - - if(ex_sol) - { - //---------------------------------------------- - // evaluate exact solution - ex_sol->Evaluate(xc0,xc1,xc2, fce); - - //---------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - Fce->SetPhys(fce); - Fce->SetPhysState(true); - - - cout << "L infinity error: " << Exp->Linf(*Fce) << endl; - cout << "L 2 error: " << Exp->L2 (*Fce) << endl; - //-------------------------------------------- - } - - Timing("Output .."); - //---------------------------------------------- - return 0; -} - diff --git a/library/Demos/MultiRegions/ExtraDemos/Helmholtz3D.cpp b/library/Demos/MultiRegions/ExtraDemos/Helmholtz3D.cpp deleted file mode 100644 index 4e24b70d7ed6a8e34ee329cb6b47e37559389bde..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/Helmholtz3D.cpp +++ /dev/null @@ -1,132 +0,0 @@ -#include -#include - -#include - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - MultiRegions::ContField3DSharedPtr Exp, Fce; - MultiRegions::ExpListSharedPtr DerExp1, DerExp2, DerExp3; - int i, nq, coordim; - Array fce; - Array xc0,xc1,xc2; - NekDouble lambda; - - if(argc != 3) - { - fprintf(stderr,"Usage: Helmholtz3D meshfile boundaryfile\n"); - exit(1); - } - - //---------------------------------------------- - // Read in mesh from input file - string meshfile(argv[1]); - SpatialDomains::MeshGraph3D graph3D; - graph3D.ReadGeometry(meshfile); - graph3D.ReadExpansions(meshfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - string bcfile(argv[2]); - SpatialDomains::BoundaryConditions bcs(&graph3D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - lambda = bcs.GetParameter("Lambda"); - cout << "Solving 3D Helmholtz:" << endl; - cout << " Lambda : " << lambda << endl; - cout << endl; - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager::AllocateSharedPtr(graph3D,bcs); - //---------------------------------------------- - - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetTotPoints(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Define forcing function for first variable defined in file - fce = Array(nq); - SpatialDomains::ConstForcingFunctionShPtr ffunc = bcs.GetForcingFunction(bcs.GetVariable(0)); - - ffunc->Evaluate(xc0,xc1,xc2,fce); - //---------------------------------------------- - - //---------------------------------------------- - // Setup expansion containing the forcing function - Fce = MemoryManager::AllocateSharedPtr(*Exp); - Fce->SetPhys(fce); - //---------------------------------------------- - - //---------------------------------------------- - // Helmholtz solution taking physical forcing - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), lambda); - //---------------------------------------------- - - //---------------------------------------------- - // Backward Transform Solution to get solved values at - Exp->BwdTrans(Exp->GetCoeffs(), Exp->UpdatePhys()); - //---------------------------------------------- - - //---------------------------------------------- - // Write solution - ofstream outfile("HelmholtzFile3D.pos"); - Exp->WriteToFile(outfile,eGmsh); - //---------------------------------------------- - - //---------------------------------------------- - // See if there is an exact solution, if so - // evaluate and plot errors - SpatialDomains::ConstExactSolutionShPtr ex_sol = bcs.GetExactSolution(bcs.GetVariable(0)); - - if(ex_sol) - { - //---------------------------------------------- - // evaluate exact solution - ex_sol->Evaluate(xc0,xc1,xc2,fce); - - //---------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - Fce->SetPhys(fce); - Fce->SetPhysState(true); - - - cout << "L infinity error: " << Exp->Linf(Fce->GetPhys()) << endl; - cout << "L 2 error: " << Exp->L2 (Fce->GetPhys()) << endl; - //-------------------------------------------- - } - //---------------------------------------------- - return 0; -} - diff --git a/library/Demos/MultiRegions/ExtraDemos/Laplace2D.cpp b/library/Demos/MultiRegions/ExtraDemos/Laplace2D.cpp deleted file mode 100644 index f606ff435cb06f08ca75d229f73cdcf51bf97290..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/Laplace2D.cpp +++ /dev/null @@ -1,175 +0,0 @@ -#include -#include - -#include - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - MultiRegions::ContField2DSharedPtr Exp,Fce; - int i, j, nq, coordim; - Array fce; - Array xc0,xc1,xc2; - NekDouble lambda; - - if(argc != 3) - { - fprintf(stderr,"Usage: Laplace2D meshfile boundaryfile\n"); - exit(1); - } - - //---------------------------------------------- - // Read in mesh from input file - string meshfile(argv[1]); - SpatialDomains::MeshGraph2D graph2D; - graph2D.ReadGeometry(meshfile); - graph2D.ReadExpansions(meshfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - string bcfile(argv[2]); - SpatialDomains::BoundaryConditions bcs(&graph2D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - const SpatialDomains::ExpansionVector &expansions = graph2D.GetExpansions(); - LibUtilities::BasisKey bkey = graph2D.GetBasisKey(expansions[0],0); - cout << "Solving 2D Laplace:" << endl; - cout << " Expansion : " << SpatialDomains::kExpansionTypeStr[expansions[0]->m_ExpansionType] << endl; - cout << " No. modes : " << (int) expansions[0]->m_NumModesEqn.Evaluate() << endl; - cout << endl; - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager:: - AllocateSharedPtr(graph2D,bcs); - //---------------------------------------------- - - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetTotPoints(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Define forcing function for first variable defined in file - fce = Array(nq); - SpatialDomains::ConstForcingFunctionShPtr ffunc - = bcs.GetForcingFunction(bcs.GetVariable(0)); - - ffunc->Evaluate(xc0,xc1,xc2,fce); - //---------------------------------------------- - - //---------------------------------------------- - // Setup expansion containing the forcing function - Fce = MemoryManager::AllocateSharedPtr(*Exp); - Fce->SetPhys(fce); - //---------------------------------------------- - - //---------------------------------------------- - // Set up the variable laplacian coefficients - bool useVariableCoeffs = false; - if(bcs.CheckForParameter("UseVariableCoefficients")) - { - if((int) bcs.GetParameter("UseVariableCoefficients") != 0) - { - useVariableCoeffs = true; - } - } - - Array > lapcoeff(3); - if(useVariableCoeffs) - { - std::string lapcoeffstr[3] = {"LaplacianCoefficient_00", - "LaplacianCoefficient_01", - "LaplacianCoefficient_11"}; - for(i = 0 ; i < 3; i++) - { - lapcoeff[i] = Array(nq); - - SpatialDomains::ConstUserDefinedEqnShPtr cfunc = bcs.GetUserDefinedEqn(lapcoeffstr[i]); - - cfunc->Evaluate(xc0,xc1,xc2,lapcoeff[i]); - } - } - //---------------------------------------------- - - //---------------------------------------------- - // Laplacian solution taking physical forcing - if(useVariableCoeffs) - { - Exp->LaplaceSolve(*Fce,lapcoeff); - } - else - { - Exp->LaplaceSolve(*Fce); - } - //---------------------------------------------- - - //---------------------------------------------- - // Backward Transform Solution to get solved values at - Exp->BwdTrans(*Exp); - //---------------------------------------------- - - //---------------------------------------------- - // Write solution - ofstream outfile("LaplaceFile2D.pos"); - Exp->WriteToFile(outfile,eGmsh); - outfile.close(); - - ofstream outfile2("LaplaceFile2D.dat"); - Exp->WriteToFile(outfile2,eTecplot); - outfile2.close(); - //---------------------------------------------- - - //---------------------------------------------- - // See if there is an exact solution, if so - // evaluate and plot errors - SpatialDomains::ConstExactSolutionShPtr ex_sol = - bcs.GetExactSolution(bcs.GetVariable(0)); - - if(ex_sol) - { - //---------------------------------------------- - // evaluate exact solution - ex_sol->Evaluate(xc0,xc1,xc2,fce); - //---------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - Fce->SetPhys(fce); - Fce->SetPhysState(true); - - - cout << "L infinity error: " << Exp->Linf(*Fce) << endl; - cout << "L 2 error: " << Exp->L2 (*Fce) << endl; - //-------------------------------------------- - } - //---------------------------------------------- - return 0; -} - diff --git a/library/Demos/MultiRegions/ExtraDemos/TimingCGHelmSolve2D.cpp b/library/Demos/MultiRegions/ExtraDemos/TimingCGHelmSolve2D.cpp deleted file mode 100644 index 454640428767fa7dce7299ef8445207298b6816f..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/TimingCGHelmSolve2D.cpp +++ /dev/null @@ -1,426 +0,0 @@ -#include -#include - -#include -#include -#include -#include -#include - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - LibUtilities::SessionReaderSharedPtr vSession; - LibUtilities::CommSharedPtr vComm; - MultiRegions::ContField2DSharedPtr Exp,Fce,Sol; - int i, nq, coordim; - Array fce,sol; - Array xc0,xc1,xc2; - NekDouble lambda; - MultiRegions::GlobalSysSolnType SolnType = MultiRegions::eDirectMultiLevelStaticCond; - string vCommModule("Serial"); - - if(argc != 5) - { - fprintf(stderr,"Usage: TimingCGHelmSolve2D Type NumElements NumModes OptimisationLevel\n"); - exit(1); - } - - int Type = atoi(argv[1]); - int NumElements = atoi(argv[2]); - int NumModes = atoi(argv[3]); - int optLevel = atoi(argv[4]); - // optLevel = 0 --> elemental and matrix free - // optLevel = 2 --> block matrix operations - // optLevel = 3 --> global matrix operation - // optLevel = 4 --> optimal implementation strategies (optimal evaluation for every different evaluation) - - //--------------------- - // Do a fix in case optLevel 3 is chosen. If the number of elements - // and the polynomial order is chosen too high, change OptLevel to 2. - // Otherwise, it may cause memory overflow - int optLevelVal = optLevel; - if( optLevel==3 ) - { - if( (Type<3) && (NumElements>200) && (NumModes>7) ) - { - return 0; - } - if( (Type==3) && (NumElements>14) && (NumModes>10) ) - { - return 0; - } - } - //---------------------------------------------- - - //---------------------------------------------- - // Retrieve the necessary input files - stringstream MeshFileSS; - stringstream BCfileSS; - stringstream ExpansionsFileSS; - - int noBis = 1; - - switch(Type) - { - case 1: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/RegularQuadMeshes/"; - MeshFileSS << "UnitSquare_RegularQuadMesh_" << NumElements << "Elements.xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - } - break; - case 2: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/DeformedQuadMeshes/"; - MeshFileSS << "UnitSquare_DeformedQuadMesh_" << NumElements << "Elements.xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - } - break; - case 3: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/RegularTriMeshes/"; - MeshFileSS << "UnitSquare_RegularTriMesh_h_1_" << NumElements << ".xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - } - break; - case 4: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/Kirby/kirby_10K.xml"; - - BCfileSS << "/Users/ssherw/HDG/Meshes/Kirby/kirby_10K.xml"; - - noBis = 0; - } - break; - default: - { - cerr << "Type should be equal to one of the following values: "<< endl; - cerr << " 1: Regular Quads" << endl; - cerr << " 2: Deformed Quads" << endl; - cerr << " 3: Regular Tris" << endl; - cerr << " 4: Kirby mesh" << endl; - exit(1); - } - } - - - ExpansionsFileSS << "/Users/ssherw/HDG/Meshes/Expansions/NektarExpansionsNummodes"; - ExpansionsFileSS << NumModes << ".xml"; - - string meshfile = MeshFileSS.str(); - string expansionfile = ExpansionsFileSS.str(); - string bcfile = BCfileSS.str(); - - vSession = MemoryManager::AllocateSharedPtr(meshfile); - - - string globoptfile; - - switch(optLevel) - { - case 0: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/NoGlobalMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 2: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/DoBlockMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 3: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/DoGlobalMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 4: - { - - - switch(Type) - { - case 1: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/"; - GlobOptFileSS << "UnitSquare_RegularQuadMesh_" << NumElements << "Elements_" << NumModes << "Modes_GlobOpt.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 3: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/"; - GlobOptFileSS << "UnitSquare_RegularTriMesh_h_1_" << NumElements << "_" << NumModes << "Modes_GlobOpt.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - default: - { - cerr << "No optimal strategy defined for this case "<< endl; - exit(1); - } - } - - } - break; - default: - { - ASSERTL0(false,"Unrecognised optimisation level"); - } - } - //---------------------------------------------- - - - //---------------------------------------------- - // Read in mesh from input file - SpatialDomains::MeshGraph2D graph2D; - graph2D.ReadGeometry(meshfile); - graph2D.ReadExpansions(expansionfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - SpatialDomains::BoundaryConditions bcs(&graph2D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - lambda = bcs.GetParameter("Lambda"); - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager:: - AllocateSharedPtr(vSession,graph2D,bcs); - //---------------------------------------------- - // NumElements = Exp->GetExpSize(); - - //---------------------------------------------- - // load global optimisation parameters - Exp->ReadGlobalOptimizationParameters(globoptfile); - //---------------------------------------------- - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetTotPoints(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Define forcing function for first variable defined in file - fce = Array(nq); - SpatialDomains::ConstForcingFunctionShPtr ffunc - = bcs.GetForcingFunction(bcs.GetVariable(0)); - - ffunc->Evaluate(xc0,xc1,xc2,fce); - - //---------------------------------------------- - // Setup expansion containing the forcing function - Fce = MemoryManager::AllocateSharedPtr(*Exp); - Fce->SetPhys(fce); - //---------------------------------------------- - - //---------------------------------------------- - // Helmholtz solution taking physical forcing - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateContCoeffs(),lambda,true); - //---------------------------------------------- - - //---------------------------------------------- - // Backward Transform Solution to get solved values at - Exp->BwdTrans(Exp->GetContCoeffs(), Exp->UpdatePhys(),true); - //---------------------------------------------- - - //---------------------------------------------- - // See if there is an exact solution, if so - // evaluate and plot errors - SpatialDomains::ConstExactSolutionShPtr ex_sol = - bcs.GetExactSolution(bcs.GetVariable(0)); - - //---------------------------------------------- - // evaluate exact solution - sol = Array(nq); - ex_sol->Evaluate(xc0,xc1,xc2,sol); - //---------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - Sol = MemoryManager::AllocateSharedPtr(*Exp); - Sol->SetPhys(sol); - Sol->SetPhysState(true); - - NekDouble L2Error = Exp->L2 (Sol->GetPhys()); - NekDouble H1Error = Exp->H1 (Sol->GetPhys()); - NekDouble LinfError = Exp->Linf(Sol->GetPhys()); - //-------------------------------------------- - // alternative error calculation - NekDouble L2ErrorBis; - NekDouble H1ErrorBis; - NekDouble LinfErrorBis; - if(noBis) - { - const LibUtilities::PointsKey PkeyT1(30,LibUtilities::eGaussLobattoLegendre); - const LibUtilities::PointsKey PkeyT2(30,LibUtilities::eGaussRadauMAlpha1Beta0); - const LibUtilities::PointsKey PkeyQ1(30,LibUtilities::eGaussLobattoLegendre); - const LibUtilities::PointsKey PkeyQ2(30,LibUtilities::eGaussLobattoLegendre); - LibUtilities::BasisKeyVector BkeyT; - LibUtilities::BasisKeyVector BkeyQ; - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyT1)); - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_B, - NumModes, PkeyT2)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyQ1)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyQ2)); - - graph2D.SetBasisKey(SpatialDomains::eTriangle, BkeyT); - graph2D.SetBasisKey(SpatialDomains::eQuadrilateral, BkeyQ); - - MultiRegions::ExpList2DSharedPtr ErrorExp = - MemoryManager::AllocateSharedPtr(vComm, - graph2D); - - - int ErrorCoordim = ErrorExp->GetCoordim(0); - int ErrorNq = ErrorExp->GetTotPoints(); - - Array ErrorXc0(ErrorNq,0.0); - Array ErrorXc1(ErrorNq,0.0); - Array ErrorXc2(ErrorNq,0.0); - - switch(ErrorCoordim) - { - case 1: - ErrorExp->GetCoords(ErrorXc0); - break; - case 2: - ErrorExp->GetCoords(ErrorXc0,ErrorXc1); - break; - case 3: - ErrorExp->GetCoords(ErrorXc0,ErrorXc1,ErrorXc2); - break; - } - - // evaluate exact solution - Array ErrorSol(ErrorNq); - ex_sol->Evaluate(ErrorXc0,ErrorXc1,ErrorXc2,ErrorSol); - - // calcualte spectral/hp approximation on the quad points of this new - // expansion basis - Exp->GlobalToLocal(Exp->GetContCoeffs(),ErrorExp->UpdateCoeffs()); - ErrorExp->BwdTrans_IterPerExp(ErrorExp->GetCoeffs(),ErrorExp->UpdatePhys()); - - L2ErrorBis = ErrorExp->L2 (ErrorSol); - H1ErrorBis = ErrorExp->H1 (ErrorSol); - LinfErrorBis = ErrorExp->Linf(ErrorSol); - } - else - { - L2ErrorBis = L2Error; - H1ErrorBis = H1Error; - LinfErrorBis = LinfError; - } - //---------------------------------------------- - - //---------------------------------------------- - // Do the timings - timeval timer1, timer2; - NekDouble time1, time2; - NekDouble exeTime; - - // We first do a single run in order to estimate the number of calls - // we are going to make - gettimeofday(&timer1, NULL); - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateContCoeffs(),lambda,true); - Exp->BwdTrans (Exp->GetContCoeffs(),Exp->UpdatePhys(),true); - gettimeofday(&timer2, NULL); - time1 = timer1.tv_sec*1000000.0+(timer1.tv_usec); - time2 = timer2.tv_sec*1000000.0+(timer2.tv_usec); - exeTime = (time2-time1); - - int NumCalls = (int) ceil(1.0e6/exeTime); - if(NumCalls < 1) - { - NumCalls = 1; - } - - gettimeofday(&timer1, NULL); - for(i = 0; i < NumCalls; ++i) - { - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateContCoeffs(),lambda,true); - Exp->BwdTrans (Exp->GetContCoeffs(),Exp->UpdatePhys(),true); - } - gettimeofday(&timer2, NULL); - - time1 = timer1.tv_sec*1000000.0+(timer1.tv_usec); - time2 = timer2.tv_sec*1000000.0+(timer2.tv_usec); - exeTime = (time2-time1); - - int nLocCoeffs = Exp->GetNcoeffs(); - int nGlobCoeffs = Exp->GetContNcoeffs(); - int nLocBndCoeffs = Exp->GetLocalToGlobalMap()->GetNumLocalBndCoeffs(); - int nGlobBndCoeffs = Exp->GetLocalToGlobalMap()->GetNumGlobalBndCoeffs(); - int nLocDirCoeffs = Exp->GetLocalToGlobalMap()->GetNumLocalDirBndCoeffs(); - int nGlobDirCoeffs = Exp->GetLocalToGlobalMap()->GetNumGlobalDirBndCoeffs(); - MultiRegions::GlobalMatrixKey key(StdRegions::eHelmholtz,lambda,Exp->GetLocalToGlobalMap()); - int nnz = Exp->GetGlobalMatrixNnz(key); - - ofstream outfile("TimingCGHelmSolve2D_NektarppResult.dat"); - outfile.precision(0); - outfile << setw(10) << Type << " "; - outfile << setw(10) << NumElements << " "; - outfile << setw(10) << NumModes << " "; - outfile << setw(10) << NumCalls << " "; - outfile << setw(10) << fixed << noshowpoint << exeTime << " "; - outfile << setw(10) << fixed << noshowpoint << ((NekDouble) (exeTime/((NekDouble)NumCalls))) << " "; - outfile.precision(7); - outfile << setw(15) << scientific << noshowpoint << L2Error << " "; - outfile << setw(15) << scientific << noshowpoint << L2ErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << LinfError << " "; - outfile << setw(15) << scientific << noshowpoint << LinfErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << H1Error << " "; - outfile << setw(15) << scientific << noshowpoint << H1ErrorBis << " "; - outfile << setw(10) << nLocCoeffs << " "; - outfile << setw(10) << nGlobCoeffs << " "; - outfile << setw(10) << nLocBndCoeffs << " "; - outfile << setw(10) << nGlobBndCoeffs << " "; - outfile << setw(10) << nLocDirCoeffs << " "; - outfile << setw(10) << nGlobDirCoeffs << " "; - outfile << setw(10) << nnz << " "; - outfile << setw(10) << optLevel << " "; - outfile << endl; - - outfile.close(); - //---------------------------------------------- - - return 0; -} - diff --git a/library/Demos/MultiRegions/ExtraDemos/TimingHDGHelmSolve2D.cpp b/library/Demos/MultiRegions/ExtraDemos/TimingHDGHelmSolve2D.cpp deleted file mode 100644 index 5334937f59effd7abf40bc3e88ce34e1fd15e3f1..0000000000000000000000000000000000000000 --- a/library/Demos/MultiRegions/ExtraDemos/TimingHDGHelmSolve2D.cpp +++ /dev/null @@ -1,459 +0,0 @@ -#include -#include - -#include -#include -#include -#include -#include - -using namespace std; -using namespace Nektar; - -int main(int argc, char *argv[]) -{ - LibUtilities::SessionReaderSharedPtr vSession; - LibUtilities::CommSharedPtr vComm; - MultiRegions::DisContField2DSharedPtr Exp,Fce,Sol; - int i, nq, coordim; - Array fce,sol; - Array xc0,xc1,xc2; - NekDouble lambda; - MultiRegions::GlobalSysSolnType SolnType = MultiRegions::eDirectMultiLevelStaticCond; - string vCommModule("Serial"); - - if(argc != 5) - { - fprintf(stderr,"Usage: TimingCGHelmSolve2D Type NumElements NumModes OptimisationLevel\n"); - exit(1); - } - - int Type = atoi(argv[1]); - int NumElements = atoi(argv[2]); - int NumModes = atoi(argv[3]); - int optLevel = atoi(argv[4]); - // optLevel = 0 --> elemental and matrix free - // optLevel = 2 --> global and block matrix operations (i.e. needs assembly) - // optLevel = 3 --> global matrix operation (i.e. no assembly) - // optLevel = 4 --> optimal implementation strategies (optimal evaluation for every different evaluation) - - //---------------------------------------------- - // Retrieve the necessary input files - stringstream MeshFileSS; - stringstream BCfileSS; - stringstream ExpansionsFileSS; - - int noBis = 1; - - - - switch(Type) - { - case 1: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/RegularQuadMeshes/"; - MeshFileSS << "UnitSquare_RegularQuadMesh_" << NumElements << "Elements.xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - - } - break; - case 2: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/DeformedQuadMeshes/"; - MeshFileSS << "UnitSquare_DeformedQuadMesh_" << NumElements << "Elements.xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - - } - break; - case 3: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/RegularTriMeshes/"; - MeshFileSS << "UnitSquare_RegularTriMesh_h_1_" << NumElements << ".xml"; - BCfileSS << "/Users/ssherw/HDG/Meshes/Conditions/UnitSquare_DirichletBoundaryConditions.xml"; - - } - break; - case 4: - { - MeshFileSS << "/Users/ssherw/HDG/Meshes/Kirby/kirby_10K.xml"; - - BCfileSS << "/Users/ssherw/HDG/Meshes/Kirby/kirby_10K.xml"; - - noBis = 0; - } - break; - default: - { - cerr << "Type should be equal to one of the following values: "<< endl; - cerr << " 1: Regular Quads" << endl; - cerr << " 2: Deformed Quads" << endl; - cerr << " 3: Regular Tris" << endl; - exit(1); - } - } - - ExpansionsFileSS << "/Users/ssherw/HDG/Meshes/Expansions/NektarExpansionsNummodes"; - ExpansionsFileSS << NumModes << ".xml"; - - string meshfile = MeshFileSS.str(); - string expansionfile = ExpansionsFileSS.str(); - string bcfile = BCfileSS.str(); - - vSession = MemoryManager::AllocateSharedPtr(bcfile); - - string globoptfile; - - switch(optLevel) - { - case 0: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/NoGlobalMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 2: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/DoBlockMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 3: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/DoGlobalMat.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - case 4: - { - stringstream GlobOptFileSS; - GlobOptFileSS << "/Users/ssherw/HDG/Meshes/Optimisation/"; - GlobOptFileSS << "UnitSquare_RegularQuadMesh_" << NumElements << "Elements_" << NumModes << "Modes_GlobOpt.xml"; - globoptfile = GlobOptFileSS.str(); - } - break; - default: - { - ASSERTL0(false,"Unrecognised optimisation level"); - } - } - //---------------------------------------------- - - - //---------------------------------------------- - // Read in mesh from input file - SpatialDomains::MeshGraph2D graph2D; - graph2D.ReadGeometry(meshfile); - graph2D.ReadExpansions(expansionfile); - //---------------------------------------------- - - //---------------------------------------------- - // read the problem parameters from input file - SpatialDomains::BoundaryConditions bcs(&graph2D); - bcs.Read(bcfile); - //---------------------------------------------- - - //---------------------------------------------- - // Print summary of solution details - lambda = bcs.GetParameter("Lambda"); - //---------------------------------------------- - - //---------------------------------------------- - // Define Expansion - Exp = MemoryManager:: - AllocateSharedPtr(vSession,graph2D,bcs); - //---------------------------------------------- - - //---------------------------------------------- - // load global optimisation parameters - Exp->ReadGlobalOptimizationParameters(globoptfile); - //---------------------------------------------- - - //---------------------------------------------- - // Set up coordinates of mesh for Forcing function evaluation - coordim = Exp->GetCoordim(0); - nq = Exp->GetTotPoints(); - - xc0 = Array(nq,0.0); - xc1 = Array(nq,0.0); - xc2 = Array(nq,0.0); - - switch(coordim) - { - case 1: - Exp->GetCoords(xc0); - break; - case 2: - Exp->GetCoords(xc0,xc1); - break; - case 3: - Exp->GetCoords(xc0,xc1,xc2); - break; - } - //---------------------------------------------- - - //---------------------------------------------- - // Define forcing function for first variable defined in file - fce = Array(nq); - SpatialDomains::ConstForcingFunctionShPtr ffunc - = bcs.GetForcingFunction(bcs.GetVariable(0)); - ffunc->Evaluate(xc0,xc1,xc2,fce); - //---------------------------------------------- - - //---------------------------------------------- - // Setup expansion containing the forcing function - Fce = MemoryManager::AllocateSharedPtr(*Exp); - Fce->SetPhys(fce); - //---------------------------------------------- - - //---------------------------------------------- - // Helmholtz solution taking physical forcing - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), lambda); - //---------------------------------------------- - - //---------------------------------------------- - // Backward Transform Solution to get solved values at - Exp->BwdTrans(Exp->GetCoeffs(), Exp->UpdatePhys()); - //---------------------------------------------- - - //---------------------------------------------- - // See if there is an exact solution, if so - // evaluate and plot errors - SpatialDomains::ConstExactSolutionShPtr ex_sol = - bcs.GetExactSolution(bcs.GetVariable(0)); - - //---------------------------------------------- - // evaluate exact solution - sol = Array(nq); - ex_sol->Evaluate(xc0,xc1,xc2,sol); - //---------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - Sol = MemoryManager::AllocateSharedPtr(*Exp); - Sol->SetPhys(sol); - Sol->SetPhysState(true); - - NekDouble L2Error = Exp->L2 (Sol->GetPhys()); - NekDouble H1Error = Exp->H1 (Sol->GetPhys()); - NekDouble LinfError = Exp->Linf(Sol->GetPhys()); - //-------------------------------------------- - // alternative error calculation - NekDouble L2ErrorBis; - NekDouble L2ErrorPostProc; - NekDouble H1ErrorBis; - NekDouble LinfErrorBis; - NekDouble LinfErrorPostProc; - NekDouble Q0L2ErrorBis; - NekDouble Q1L2ErrorBis; - - if(noBis) - { - const LibUtilities::PointsKey PkeyT1(30,LibUtilities::eGaussLobattoLegendre); - const LibUtilities::PointsKey PkeyT2(30,LibUtilities::eGaussRadauMAlpha1Beta0); - const LibUtilities::PointsKey PkeyQ1(30,LibUtilities::eGaussLobattoLegendre); - const LibUtilities::PointsKey PkeyQ2(30,LibUtilities::eGaussLobattoLegendre); - - LibUtilities::BasisKeyVector BkeyT; - LibUtilities::BasisKeyVector BkeyQ; -#ifdef NMODESP1_Err - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes+1, PkeyT1)); - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_B, - NumModes+1, PkeyT2)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes+1, PkeyQ1)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes+1, PkeyQ2)); -#else - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyT1)); - BkeyT.push_back(LibUtilities::BasisKey(LibUtilities::eModified_B, - NumModes, PkeyT2)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyQ1)); - BkeyQ.push_back(LibUtilities::BasisKey(LibUtilities::eModified_A, - NumModes, PkeyQ2)); -#endif - - graph2D.SetBasisKey(SpatialDomains::eTriangle, BkeyT); - graph2D.SetBasisKey(SpatialDomains::eQuadrilateral, BkeyQ); - - MultiRegions::DisContField2DSharedPtr ErrorExp = - MemoryManager::AllocateSharedPtr(vComm,graph2D,bcs); - - int ErrorCoordim = ErrorExp->GetCoordim(0); - int ErrorNq = ErrorExp->GetTotPoints(); - - Array ErrorXc0(ErrorNq,0.0); - Array ErrorXc1(ErrorNq,0.0); - Array ErrorXc2(ErrorNq,0.0); - - switch(ErrorCoordim) - { - case 1: - ErrorExp->GetCoords(ErrorXc0); - break; - case 2: - ErrorExp->GetCoords(ErrorXc0,ErrorXc1); - break; - case 3: - ErrorExp->GetCoords(ErrorXc0,ErrorXc1,ErrorXc2); - break; - } - - - // evaluate exact solution - Array ErrorSol(ErrorNq); - ex_sol->Evaluate(ErrorXc0,ErrorXc1,ErrorXc2,ErrorSol); - - // calcualte spectral/hp approximation on the quad points of this new - // expansion basis -#define NMODESP1_Err -#ifdef NMODESP1_Err - std::vector FieldDef - = Exp->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - std::string fieldstr = "u"; - - for(i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back(fieldstr); - Exp->AppendFieldData(FieldDef[i], FieldData[i]); - ErrorExp->ExtractDataToCoeffs(FieldDef[i],FieldData[i],fieldstr); - } - - // Interpolotation trace - std::vector TraceDef - = Exp->GetTrace()->GetFieldDefinitions(); - std::vector > TraceData(FieldDef.size()); - for(i = 0; i < TraceDef.size(); ++i) - { - TraceDef[i]->m_fields.push_back(fieldstr); - Exp->GetTrace()->AppendFieldData(TraceDef[i], TraceData[i]); - ErrorExp->GetTrace()->ExtractDataToCoeffs(TraceDef[i],TraceData[i],fieldstr); - } - - ErrorExp->BwdTrans_IterPerExp(ErrorExp->GetCoeffs(),ErrorExp->UpdatePhys()); - - L2ErrorBis = ErrorExp->L2 (ErrorSol); - H1ErrorBis = ErrorExp->H1 (ErrorSol); - LinfErrorBis = ErrorExp->Linf(ErrorSol); - - Array Deriv(ErrorNq); - ErrorExp->PhysDeriv(0,ErrorSol,Deriv); - Q0L2ErrorBis = ErrorExp->L2_DGDeriv(0,Deriv); - ErrorExp->PhysDeriv(1,ErrorSol,Deriv); - Q1L2ErrorBis = ErrorExp->L2_DGDeriv(1,Deriv); - - ErrorExp->EvaluateHDGPostProcessing(ErrorExp->UpdateCoeffs()); - ErrorExp->BwdTrans_IterPerExp(ErrorExp->GetCoeffs(),ErrorExp->UpdatePhys()); - L2ErrorPostProc = ErrorExp->L2 (ErrorSol); - LinfErrorPostProc = ErrorExp->Linf(ErrorSol); -#else - ErrorExp->BwdTrans_IterPerExp(Exp->GetCoeffs(),ErrorExp->UpdatePhys()); - - L2ErrorBis = ErrorExp->L2 (ErrorSol); - H1ErrorBis = ErrorExp->H1 (ErrorSol); - LinfErrorBis = ErrorExp->Linf(ErrorSol); - - // Copy coefficients and trace space for Q error evaluation - Vmath::Vcopy(ErrorExp->GetNcoeffs(),Exp->GetCoeffs(),1,ErrorExp->UpdateCoeffs(),1); - Vmath::Vcopy(ErrorExp->GetTrace()->GetNcoeffs(),Exp->GetTrace()->GetCoeffs(),1,ErrorExp->GetTrace()->UpdateCoeffs(),1); - - Array Deriv(ErrorNq); - ErrorExp->PhysDeriv(0,ErrorSol,Deriv); - Q0L2ErrorBis = ErrorExp->L2_DGDeriv(0,Deriv); - ErrorExp->PhysDeriv(1,ErrorSol,Deriv); - Q1L2ErrorBis = ErrorExp->L2_DGDeriv(1,Deriv); -#endif - - } - else - { - L2ErrorBis = L2Error; - H1ErrorBis = H1Error; - LinfErrorBis = LinfError; - Q0L2ErrorBis = Q1L2ErrorBis = 0; - } - //---------------------------------------------- - - //---------------------------------------------- - // Do the timings - timeval timer1, timer2; - NekDouble time1, time2; - NekDouble exeTime; - - // We first do a single run in order to estimate the number of calls - // we are going to make - gettimeofday(&timer1, NULL); - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), lambda); - Exp->BwdTrans(Exp->GetCoeffs(), Exp->UpdatePhys()); - gettimeofday(&timer2, NULL); - time1 = timer1.tv_sec*1000000.0+(timer1.tv_usec); - time2 = timer2.tv_sec*1000000.0+(timer2.tv_usec); - exeTime = (time2-time1); - - int NumCalls = (int) ceil(1.0e6/exeTime); - if(NumCalls < 1) - { - NumCalls = 1; - } - - gettimeofday(&timer1, NULL); - for(i = 0; i < NumCalls; ++i) - { - Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), lambda); - Exp->BwdTrans(Exp->GetCoeffs(), Exp->UpdatePhys()); - } - gettimeofday(&timer2, NULL); - - time1 = timer1.tv_sec*1000000.0+(timer1.tv_usec); - time2 = timer2.tv_sec*1000000.0+(timer2.tv_usec); - exeTime = (time2-time1); - - int nLocCoeffs = Exp->GetNcoeffs(); - int nGlobCoeffs = nLocCoeffs; - int nLocBndCoeffs = 0; - int nGlobBndCoeffs = 0; - int nLocDirCoeffs = 0; - int nGlobDirCoeffs = 0; - int nnz = 0; - - ofstream outfile("TimingHDGHelmSolve2D_NektarppResult.dat"); - outfile.precision(0); - outfile << setw(10) << Type << " "; - outfile << setw(10) << NumElements << " "; - outfile << setw(10) << NumModes << " "; - outfile << setw(10) << NumCalls << " "; - outfile << setw(10) << fixed << noshowpoint << exeTime << " "; - outfile << setw(10) << fixed << noshowpoint << ((NekDouble) (exeTime/((NekDouble)NumCalls))) << " "; - outfile.precision(7); - outfile << setw(15) << scientific << noshowpoint << L2Error << " "; - outfile << setw(15) << scientific << noshowpoint << L2ErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << LinfError << " "; - outfile << setw(15) << scientific << noshowpoint << LinfErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << H1Error << " "; - outfile << setw(15) << scientific << noshowpoint << H1ErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << Q0L2ErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << Q1L2ErrorBis << " "; - outfile << setw(15) << scientific << noshowpoint << L2ErrorPostProc << " "; - outfile << setw(15) << scientific << noshowpoint << LinfErrorPostProc << " "; - outfile << setw(10) << nLocCoeffs << " "; - outfile << setw(10) << nGlobCoeffs << " "; - outfile << setw(10) << nLocBndCoeffs << " "; - outfile << setw(10) << nGlobBndCoeffs << " "; - outfile << setw(10) << nLocDirCoeffs << " "; - outfile << setw(10) << nGlobDirCoeffs << " "; - outfile << setw(10) << nnz << " "; - outfile << setw(10) << optLevel << " "; - outfile << endl; - - outfile.close(); - //---------------------------------------------- - - return 0; -} - diff --git a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Prism_Deformed.tst b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Prism_Deformed.tst index fc3b55cf9609ac89a202c5e484f73448f603218b..07fd946d4a1522d87be97070fd863a3ebdd51dd3 100644 --- a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Prism_Deformed.tst +++ b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Prism_Deformed.tst @@ -9,10 +9,10 @@ - 3.35253e-06 + 1.13842e-05 - 4.45606e-05 + 7.59576e-05
diff --git a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr.tst b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr.tst index 04cc5f8d442718cba7e9d41f415f6da59acd5489..99abb9820d08fca031d7a3e05be3069f60860097 100644 --- a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr.tst +++ b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr.tst @@ -8,10 +8,10 @@ - 5.36868e-07 + 5.00954e-07 - 3.78881e-06 + 4.36308e-06
diff --git a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr_Deformed.tst b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr_Deformed.tst index 72d45a52f24435b3dad2a32b438cf85c7dff3031..794878f0c3ca44292fc433546ba05e3f0fccd321 100644 --- a/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr_Deformed.tst +++ b/library/Demos/MultiRegions/Tests/Helmholtz3D_CG_Pyr_Deformed.tst @@ -8,10 +8,10 @@ - 4.61818e-05 + 2.04919e-05 - 0.000329233 + 0.000165154
diff --git a/library/Demos/Polylib/Polylib_test.cpp b/library/Demos/Polylib/Polylib_test.cpp deleted file mode 100644 index ac7691df48aef26ac18c976993ee1b91b8755a2c..0000000000000000000000000000000000000000 --- a/library/Demos/Polylib/Polylib_test.cpp +++ /dev/null @@ -1,504 +0,0 @@ -#include -#include -#include "Polylib.h" - -#ifdef __cplusplus -#include - -using namespace std; -using namespace Polylib; -#endif - -/* -------------------------------------------------------------------- - To compile: - g++ -g -c Polylib_test.cpp -I ../../../include - g++ -g -o polytest Polylib_test.o -L../../ -lPolylib -lm - * --------------------------------------------------------------------*/ - -/* ------------------------------------------------------------------- - This is a routine to test the integration, differentiation and - interpolation routines in the polylib.c. - - First, it performs the integral - - /1 alpha beta alpha,beta - | (1-x) (1+x) P (x) dx = 0 - /-1 n - - for all -0.5 <= alpha <= 5 (increments of 0.5) - -0.5 <= beta <= 5 (increments of 0.5) - - using np points where - NPLOWER <= np <= NPUPPER - 2 <= n <= 2*np - delta - - delta = 1 (gauss), 2(radau), 3(lobatto). - The integral is evaluated and if it is larger that EPS then the - value of alpha,beta,np,n and the integral is printed to the screen. - - After every alpha value the statement - "finished checking all beta values for alpha = #" - is printed - - The routine then evaluates the derivate of - - d n n-1 - -- x = n x - dx - - for all -0.5 <= alpha <= 5 (increments of 0.5) - -0.5 <= beta <= 5 (increments of 0.5) - - using np points where - NPLOWER <= np <= NPUPPER - 2 <= n <= np - 1 - - The error is check in a pointwise sense and if it is larger than - EPS then the value of alpha,beta,np,n and the error is printed to - the screen. After every alpha value the statement - "finished checking all beta values for alpha = #" - is printed - - Finally the routine evaluates the interpolation of - - n n - z to x - - where z are the quadrature zeros and x are the equispaced points - - 2*i - x = ----- - 1.0 (0 <= i <= np-1) - i (np-1) - - - for all -0.5 <= alpha <= 5 (increments of 0.5) - -0.5 <= beta <= 5 (increments of 0.5) - - using np points where - NPLOWER <= np <= NPUPPER - 2 <= n <= np - 1 - - The error is check in a pointwise sense and if it is larger than - EPS then the value of alpha,beta,np,n and the error is printed to - the screen. After every alpha value the statement - "finished checking all beta values for alpha = #" - is printed - - The above checks are performed for all the Gauss, Gauss-Radau and - Gauss-Lobatto points. If you want to disable any routine then set - GAUSS_INT, GAUSS_RADAU_INT, GAUSS_LOBATTO_INT = 0 - for the integration rouintes - GAUSS_DIFF,GAUSS_RADAU_DIFF, GAUSS_LOBATTO_DIFF = 0 - for the differentiation routines - GAUSS_INTERP,GAUSS_RADAU_INTERP, GAUSS_LOBATTO_INTERP = 0 - for the interpolation routines. - ------------------------------------------------------------------*/ - -#define NPLOWER 5 -#define NPUPPER 15 -#define EPS 1e-12 - -#define GAUSS_INT 1 -#define GAUSS_RADAUM_INT 1 -#define GAUSS_RADAUP_INT 1 -#define GAUSS_LOBATTO_INT 1 -#define GAUSS_DIFF 1 -#define GAUSS_RADAUM_DIFF 1 -#define GAUSS_RADAUP_DIFF 1 -#define GAUSS_LOBATTO_DIFF 1 -#define GAUSS_INTERP 1 -#define GAUSS_RADAUM_INTERP 1 -#define GAUSS_RADAUP_INTERP 1 -#define GAUSS_LOBATTO_INTERP 1 - -/* local routines */ -double ddot (int, double *, int, double *, int); -double *dvector (int, int); - -main(){ - int np,n,i; - double *z,*w,*p,sum=0,alpha,beta,*d,*dt; - - z = dvector(0,NPUPPER-1); - w = dvector(0,NPUPPER-1); - p = dvector(0,NPUPPER-1); - - d = dvector(0,NPUPPER*NPUPPER-1); - dt = dvector(0,NPUPPER*NPUPPER-1); - -#if GAUSS_INT - /* Gauss Integration */ - printf("Begin checking Gauss integration\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgj(z,w,np,alpha,beta); - for(n = 2; n < 2*np-1; ++n){ - jacobfd(np,z,p,NULL,n,alpha,beta); - sum = ddot(np,w,1,p,1); - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d integal was %lg\n" - ,alpha,beta,np,n,sum); - } - } - - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Integration\n"); -#endif - -#if GAUSS_RADAUM_INT - /* Gauss Radau Integration */ - printf("Begin checking Gauss Radau Integration\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjm(z,w,np,alpha,beta); - for(n = 2; n < 2*np-2; ++n){ - jacobfd(np,z,p,NULL,n,alpha,beta); - sum = ddot(np,w,1,p,1); - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d integal was %lg\n" - ,alpha,beta,np,n,sum); - } - } - - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radau (z=-1) Integration\n"); -#endif - - -#if GAUSS_RADAUP_INT - /* Gauss Radau Integration */ - printf("Begin checking Gauss Radau Integration\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjp(z,w,np,alpha,beta); - for(n = 2; n < 2*np-2; ++n){ - jacobfd(np,z,p,NULL,n,alpha,beta); - sum = ddot(np,w,1,p,1); - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d integal was %lg\n" - ,alpha,beta,np,n,sum); - } - } - - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radau (z=1) Integration\n"); -#endif - -#if GAUSS_LOBATTO_INT - /* Gauss Lobatto Integration */ - printf("Begin checking Gauss Lobatto integration\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwglj(z,w,np,alpha,beta); - for(n = 2; n < 2*np-3; ++n){ - jacobfd(np,z,p,NULL,n,alpha,beta); - sum = ddot(np,w,1,p,1); - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d integal was %lg\n" - ,alpha,beta,np,n,sum); - } - } - - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Lobatto Integration\n"); -#endif - -#if GAUSS_DIFF - printf("Begin checking differentiation through Gauss points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgj(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - Dgj(d,z,np,alpha,beta); - - for(i = 0; i < np; ++i) p[i] = pow(z[i],n); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - n*pow(z[i],n-1)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Jacobi differentiation\n"); -#endif - -#if GAUSS_RADAUM_DIFF - printf("Begin checking differentiation through Gauss Radau points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjm(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - Dgrjm(d,z,np,alpha,beta); - for(i = 0; i < np; ++i) p[i] = pow(z[i],n); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - n*pow(z[i],n-1)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radau (z=-1) differentiation\n"); -#endif - -#if GAUSS_RADAUP_DIFF - printf("Begin checking differentiation through Gauss Radau (z=1) points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjp(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - Dgrjp(d,z,np,alpha,beta); - for(i = 0; i < np; ++i) p[i] = pow(z[i],n); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - n*pow(z[i],n-1)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radau (z=1) differentiation\n"); -#endif - -#if GAUSS_LOBATTO_DIFF - printf("Begin checking differentiation through Gauss Lobatto points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwglj(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - Dglj(d,z,np,alpha,beta); - for(i = 0; i < np; ++i) p[i] = pow(z[i],n); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - n*pow(z[i],n-1)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Lobatto differentiation\n"); -#endif - - /* check interpolation routines */ -#if GAUSS_INTERP - printf("Begin checking interpolation through Gauss points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgj(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - for(i = 0; i < np; ++i) { - w[i] = 2.0*i/(double)(np-1)-1.0; - p[i] = pow(z[i],n); - } - Imgj(d,z,w,np,np,alpha,beta); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - pow(w[i],n)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Jacobi interpolation\n"); -#endif - -#if GAUSS_RADAUM_INTERP - printf("Begin checking Interpolation through Gauss Radau (z=-1) points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjm(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - for(i = 0; i < np; ++i) { - w[i] = 2.0*i/(double)(np-1)-1.0; - p[i] = pow(z[i],n); - } - Imgrjm(d,z,w,np,np,alpha,beta); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - pow(w[i],n)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radua Jacobi (z=-1) interpolation\n"); -#endif -#if GAUSS_RADAUP_INTERP - printf("Begin checking Interpolation through Gauss Radau (z=1) points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwgrjp(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - for(i = 0; i < np; ++i) { - w[i] = 2.0*i/(double)(np-1)-1.0; - p[i] = pow(z[i],n); - } - Imgrjp(d,z,w,np,np,alpha,beta); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - pow(w[i],n)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Radau (z=1) interpolation\n"); -#endif - -#if GAUSS_LOBATTO_INTERP - printf("Begin checking Interpolation through Gauss Lobatto points\n"); - alpha = -0.5; - while(alpha <= 5.0){ - beta = -0.5; - while(beta <= 5.0){ - - for(np = NPLOWER; np <= NPUPPER; ++np){ - zwglj(z,w,np,alpha,beta); - for(n = 2; n < np-1; ++n){ - for(i = 0; i < np; ++i) { - w[i] = 2.0*i/(double)(np-1)-1.0; - p[i] = pow(z[i],n); - } - Imglj(d,z,w,np,np,alpha,beta); - sum = 0; - for(i = 0; i < np; ++i) - sum += fabs(ddot(np,d+i*np,1,p,1) - pow(w[i],n)); - sum /= np; - if(fabs(sum)>EPS) - printf("alpha = %lf, beta = %lf, np = %d, n = %d difference %lg\n" - ,alpha,beta,np,n,sum); - } - } - beta += 0.5; - } - printf("finished checking all beta values for alpha = %lf\n",alpha); - alpha += 0.5; - } - printf("Finished checking Gauss Lobatto interploation\n"); -#endif - - - free(z); free(w); free(p); free(d); free(dt); -} - -double ddot (int n, double *x, int incx, double *y, int incy) -{ - register double sum = 0.; - - while (n--) { - sum += (*x) * (*y); - x += incx; - y += incy; - } - return sum; -} - - -double *dvector(int nl,int nh) -{ - double *v; - - v = (double *)malloc((unsigned) (nh-nl+1)*sizeof(double)); - return v-nl; -} diff --git a/library/Demos/SpatialDomains/BC1.xml b/library/Demos/SpatialDomains/BC1.xml deleted file mode 100644 index a02e2483590974edd76bb85bbca565deec022787..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/BC1.xml +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - -

Lambda = 10

- -

Tolerance = 0.0000000001

-

Test = 2.0*Tolerance

-

Test2 = Test * Lambda

-
- - - - - - - - - - - u - - v - - - - - C[0] - C[1] - - - - - - - - - - - - - - - - - - - - - - - - - - -

- - - - - - - - - - - - - - - - - - - - - - - diff --git a/library/Demos/SpatialDomains/CMakeLists.txt b/library/Demos/SpatialDomains/CMakeLists.txt deleted file mode 100644 index 1a42de268e4175879de375a56912c7d6666408f5..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -SET(LinkLibraries - optimized SpatialDomains debug SpatialDomains-g - optimized StdRegions debug StdRegions-g - optimized LibUtilities debug LibUtilities-g - optimized ${Boost_THREAD_LIBRARY_RELEASE} debug ${Boost_THREAD_LIBRARY_DEBUG} - optimized ${Boost_IOSTREAMS_LIBRARY_RELEASE} debug ${Boost_IOSTREAMS_LIBRARY_DEBUG} - optimized ${Boost_ZLIB_LIBRARY_RELEASE} debug ${Boost_ZLIB_LIBRARY_DEBUG} - optimized ${TINYXML_LIBRARY} debug ${TINYXML_LIBRARY} -) - -#SET(SpatialDomains1DSource Graph1D.cpp) -#ADD_NEKTAR_EXECUTABLE(Graph1D SpatialDomains1DSource) -#TARGET_LINK_LIBRARIES(Graph1D ${LinkLibraries}) -#SET_LAPACK_LINK_LIBRARIES(Graph1D) - -#SET(SpatialDomains2DSource Graph2D.cpp) -#ADD_NEKTAR_EXECUTABLE(Graph2D SpatialDomains2DSource) -#TARGET_LINK_LIBRARIES(Graph2D ${LinkLibraries}) -#SET_LAPACK_LINK_LIBRARIES(Graph2D) - -#SET(SpatialDomains3DSource Graph3D.cpp) -#ADD_NEKTAR_EXECUTABLE(Graph3D SpatialDomains3DSource) -#TARGET_LINK_LIBRARIES(Graph3D ${LinkLibraries}) -#SET_LAPACK_LINK_LIBRARIES(Graph3D) - -# Generate list of available subdirectories -FILE(GLOB dir_list "*") -FOREACH(dir ${dir_list}) - IF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) - ADD_SUBDIRECTORY(${dir}) - ENDIF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) -ENDFOREACH(dir ${dir_list}) diff --git a/library/Demos/SpatialDomains/Graph1D.cpp b/library/Demos/SpatialDomains/Graph1D.cpp deleted file mode 100644 index e7fab4876f56e1fea2bffa4d4625e0b6580e77aa..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/Graph1D.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -using namespace Nektar; -using namespace SpatialDomains; -using namespace std; - -// compile using Builds/Demos/SpatialDomains -> make DEBUG=1 Graph1D - -int main(int argc, char *argv[]){ - - //if(argc != 2){ - // cerr << "usage: Graph1D file" << endl; - // exit(1); - //} - LibUtilities::SessionReaderSharedPtr vSession; - - //string in(argv[argc-1]); -#ifdef PC - string meshfile = "C:\\Data\\PhD\\Research\\dev\\Nektar++\\library\\Demos\\SpatialDomains\\meshdef1D.xml"; - string bcfile = "c:\\Data\\PhD\\Research\\dev\\Nektar++\\library\\Demos\\SpatialDomains\\BC1.xml"; -#else - string meshfile("../../../library/Demos/SpatialDomains/meshdef1D.xml"); - string bcfile("../../../library/Demos/SpatialDomains/BC1.xml"); -#endif - - MeshGraph1D graph1D; - BoundaryConditions bcs(&graph1D); - - graph1D.ReadGeometry(meshfile); - graph1D.ReadExpansions(meshfile); - bcs.Read(bcfile); - - BoundaryRegionCollection &boundaryRegions = bcs.GetBoundaryRegions(); - BoundaryConditionCollection &boundaryConditions = bcs.GetBoundaryConditions(); - - // Region 1, v component - BoundaryConditionShPtr bcShPtr((*boundaryConditions[1])["v"]); - boost::shared_ptr rbBC(boost::dynamic_pointer_cast(bcShPtr)); - - ConstForcingFunctionShPtr ffunc = bcs.GetForcingFunction("u"); - ConstInitialConditionShPtr ic = bcs.GetInitialCondition("v"); - - return 0; -} diff --git a/library/Demos/SpatialDomains/Graph2D.cpp b/library/Demos/SpatialDomains/Graph2D.cpp deleted file mode 100644 index f78bf0a62f9343206dea3cbda2b3dd166096ffa4..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/Graph2D.cpp +++ /dev/null @@ -1,88 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include - - -using namespace Nektar; -using namespace SpatialDomains; -using namespace std; - -// compile using Builds/Demos/SpatialDomains -> make DEBUG=1 Graph1D - -int main(int argc, char *argv[]){ - - //if(argc != 2){ - // cerr << "usage: Graph2D file" << endl; - // exit(1); - //} - -#if 0 - string in(argv[argc-1]); - string bcfile(argv[argc-1]); -#else - // If we all have the same relative structure, these should work for everyone. -#if 1 - - string in("../../library/Demos/SpatialDomains/meshdef2D.xml"); - string bcfile("../../library/Demos/SpatialDomains/BC1.xml"); - -#else - string in("C:/Data/PhD/Research/dev/Nektar++/library/Demos/SpatialDomains/meshdef2D.xml"); - string bcfile("c:/Data/PhD/Research/dev/Nektar++/library/Demos/SpatialDomains/BC1.xml"); -#endif - -#endif - - MeshGraph2D graph2D; - BoundaryConditions bcs(&graph2D); - - graph2D.ReadGeometry(in); - graph2D.ReadCurves(in); - graph2D.ReadExpansions(in); - bcs.Read(bcfile); - - try - { - ConstForcingFunctionShPtr ffunc = bcs.GetForcingFunction("u"); - NekDouble val = ffunc->Evaluate(8.0); - - ConstForcingFunctionShPtr ffunc2 = bcs.GetForcingFunction("v"); - val = ffunc->Evaluate(1.5); - - ConstInitialConditionShPtr ic = bcs.GetInitialCondition("v"); - val = ic->Evaluate(1.5); - - NekDouble tolerance = bcs.GetParameter("Tolerance"); - - BoundaryConditionCollection &bConditions = bcs.GetBoundaryConditions(); - - BoundaryConditionShPtr bcShPtr = (*bConditions[0])["v"]; - - if (bcShPtr->GetBoundaryConditionType() == eDirichlet) - { - DirichletBCShPtr dirichletBCShPtr = boost::static_pointer_cast(bcShPtr); - val = dirichletBCShPtr->m_dirichletCondition.Evaluate(1.5); - } - - std::string fcn1 = bcs.GetFunction("F1"); - std::string fcn2 = bcs.GetFunction("F2"); - std::string fcn3 = bcs.GetFunction("F3"); - std::string fcn4 = bcs.GetFunction("F4"); - - LibUtilities::Equation eqn1 = bcs.GetFunctionAsEquation("F3"); - - const SpatialDomains::ExpansionMap &expansions = graph2D.GetExpansions(); - } - catch(std::string err) - { - cout << err << endl; - } - - return 0; -} diff --git a/library/Demos/SpatialDomains/Graph3D.cpp b/library/Demos/SpatialDomains/Graph3D.cpp deleted file mode 100644 index 80979061227617596b937f875f77efe520572d71..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/Graph3D.cpp +++ /dev/null @@ -1,87 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include - - -using namespace Nektar; -using namespace SpatialDomains; -using namespace std; - -// compile using Builds/Demos/SpatialDomains -> make DEBUG=1 Graph1D - -int main(int argc, char *argv[]){ -// -// if(argc != 2){ -// cerr << "usage: Graph3D file" << endl; -// exit(1); -// } - -#if 0 - string in(argv[argc-1]); - string bcfile(argv[argc-1]); -#else - // If we all have the same relative structure, these should work for everyone. -#if 1 - - string in("../../../../library/Demos/SpatialDomains/meshdef3D.xml"); - string bcfile("../../../../library/Demos/SpatialDomains/BC1.xml"); - -#else - string in("C:/Data/PhD/Research/dev/Nektar++/library/Demos/SpatialDomains/meshdef3D.xml"); - string bcfile("c:/Data/PhD/Research/dev/Nektar++/library/Demos/SpatialDomains/BC1.xml"); -#endif - -#endif - - MeshGraph3D graph3D; - BoundaryConditions bcs(&graph3D); - - graph3D.ReadGeometry(in); - graph3D.ReadExpansions(in); - bcs.Read(bcfile); - - try - { - ConstForcingFunctionShPtr ffunc = bcs.GetForcingFunction("u"); - NekDouble val = ffunc->Evaluate(8.0); - - ConstForcingFunctionShPtr ffunc2 = bcs.GetForcingFunction("v"); - val = ffunc->Evaluate(1.5); - - ConstInitialConditionShPtr ic = bcs.GetInitialCondition("v"); - val = ic->Evaluate(1.5); - - NekDouble tolerance = bcs.GetParameter("Tolerance"); - - BoundaryConditionCollection &bConditions = bcs.GetBoundaryConditions(); - - BoundaryConditionShPtr bcShPtr = (*bConditions[0])["v"]; - - if (bcShPtr->GetBoundaryConditionType() == eDirichlet) - { - DirichletBCShPtr dirichletBCShPtr = boost::static_pointer_cast(bcShPtr); - val = dirichletBCShPtr->m_dirichletCondition.Evaluate(1.5); - } - - std::string fcn1 = bcs.GetFunction("F1"); - std::string fcn2 = bcs.GetFunction("F2"); - std::string fcn3 = bcs.GetFunction("F3"); - std::string fcn4 = bcs.GetFunction("F4"); - - LibUtilities::Equation eqn1 = bcs.GetFunctionAsEquation("F3"); - - const SpatialDomains::ExpansionMap &expansions = graph3D.GetExpansions(); - } - catch(std::string err) - { - cout << err << endl; - } - - return 0; -} diff --git a/library/Demos/SpatialDomains/Mesh2D.jpg b/library/Demos/SpatialDomains/Mesh2D.jpg deleted file mode 100644 index 1da787ccea75b5b37501e499695ea1fbed8beda3..0000000000000000000000000000000000000000 Binary files a/library/Demos/SpatialDomains/Mesh2D.jpg and /dev/null differ diff --git a/library/Demos/SpatialDomains/domain.xml b/library/Demos/SpatialDomains/domain.xml deleted file mode 100644 index 73661715748eb2521f71fdaeb67e2f7d0d050c77..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/domain.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - C[0-2] - - - - C[1] - C[2] - - - diff --git a/library/Demos/SpatialDomains/meshdef1D.xml b/library/Demos/SpatialDomains/meshdef1D.xml deleted file mode 100644 index 30b05932a4cefe5a5bc8d95b1ebd441e5198f1b6..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/meshdef1D.xml +++ /dev/null @@ -1,94 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - - 0.0 1.0 0.0 - 1.0 1.0 0.0 - 2.0 1.0 0.0 - 3.0 1.0 0.0 - 4.0 1.0 0.0 - 5.0 1.0 0.0 - - - - - - - 0 1 - 1 2 - 2 3 - 3 4 - 4 5 - - - - - S[0-4] - S[3,4] - V[0] - V[5] - - - C[0,1] - - - - - - - - - - - -

Lambda = 10

-

Tolerance = 0.0000000001

- - - - - u - - - - - C[1] - C[2] - - - - - - - - - - - - - -
- -
- diff --git a/library/Demos/SpatialDomains/meshdef2D.xml b/library/Demos/SpatialDomains/meshdef2D.xml deleted file mode 100644 index c72bf1897714fe311002d1639dd2c80fed94dec0..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/meshdef2D.xml +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - 0.0 0.0 0.0 - 1.0 0.0 0.0 - 2.0 0.0 0.0 - 0.0 1.0 0.0 - 1.0 1.0 0.0 - 2.0 1.0 0.0 - 0.0 2.0 0.0 - 1.0 2.0 0.0 - 2.0 2.0 0.0 - - - - - 0 1 - 1 2 - 0 3 - 0 4 - 1 4 - 2 4 - 2 5 - 3 4 - 4 5 - 3 6 - 4 6 - 4 7 - 4 8 - 5 8 - 6 7 - 7 8 - - - - - - - - - - - 4.3 2.98 4.6 - 7.3 9.2 8.4 - - - 4.3 2.98 4.6 - 7.3 9.2 8.4 - - - 4.3 2.98 4.6 - 7.3 9.2 8.4 - 7.3 9.2 8.4 - - - 4.3 2.98 4.6 - 7.3 9.2 8.4 - 4.3 2.98 4.6 - 7.3 9.2 8.4 - - - 4.3 2.98 4.6 - 7.3 9.2 8.4 - 4.3 2.98 4.6 - 7.3 9.2 8.4 - - - - - - - - - - - 3 7 2 - 0 4 3 - 1 5 4 - 6 8 5 - 7 10 9 - 11 14 10 - 11 12 15 - 8 13 12 - - - - - V[1-3] - E[5-7] - T[0-2] T[4] T[6-7] - - - C[0] - - - - - - - - diff --git a/library/Demos/SpatialDomains/meshdef3D.xml b/library/Demos/SpatialDomains/meshdef3D.xml deleted file mode 100644 index 927bcf6582aaf9e52a92d297a3b0d875248e23a8..0000000000000000000000000000000000000000 --- a/library/Demos/SpatialDomains/meshdef3D.xml +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - - - - A = 1.0 - B = 2.0 - C = 3.0 - - - - 0 0.0 0.0 0.0 - 1.0 0.0 0.0 - 2.0 0.0 0.0 - 0.0 1.0 0.0 - 1.0 1.0 0.0 - 2.0 1.0 0.0 - 0.0 2.0 0.0 - 1.0 2.0 0.0 - 2.0 2.0 0.0 - - - - - 0 1 - 1 2 - 0 3 - 0 4 - 1 4 - 2 4 - 2 5 - 3 4 - 4 5 - 3 6 - 4 6 - 4 7 - 4 8 - 5 8 - 6 7 - 7 8 - - - - - 3 7 2 - 0 4 3 - 1 5 4 - 6 8 5 - 7 10 9 - 11 14 10 - 11 12 15 - 8 13 12 - - - - - - - - - - 3 7 2 5 - 0 4 3 5 - - - - - - V[1-3] - E[5-7] - T[0-2] T[4] T[6-7] - - - C[0] - - - - - - - - diff --git a/library/Demos/StdRegions/CMakeLists.txt b/library/Demos/StdRegions/CMakeLists.txt index 62fd0e10fbcaf48d3d538412b6fa4a7322519445..fb72bd0c04790f8b92d5678726dd43d7252f0a04 100644 --- a/library/Demos/StdRegions/CMakeLists.txt +++ b/library/Demos/StdRegions/CMakeLists.txt @@ -1,45 +1,19 @@ -SET(LinkLibraries StdRegions) - -SET(StdProject0DSource StdProject0D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject0D demos StdProject0DSource) -TARGET_LINK_LIBRARIES(StdProject0D ${LinkLibraries}) - -SET(StdProject1DSource StdProject1D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject1D demos StdProject1DSource) -TARGET_LINK_LIBRARIES(StdProject1D ${LinkLibraries}) - -SET(StdProject_Diff1DSource StdProject_Diff1D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject_Diff1D demos StdProject_Diff1DSource) -TARGET_LINK_LIBRARIES(StdProject_Diff1D ${LinkLibraries}) - -SET(StdProject2DSource StdProject2D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject2D demos StdProject2DSource) -TARGET_LINK_LIBRARIES(StdProject2D ${LinkLibraries}) - -SET(StdProject_Diff2DSource StdProject_Diff2D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject_Diff2D demos StdProject_Diff2DSource) -TARGET_LINK_LIBRARIES(StdProject_Diff2D ${LinkLibraries}) - -SET(StdEquiToCoeff2DSource StdEquiToCoeff2D.cpp) -ADD_NEKTAR_EXECUTABLE(StdEquiToCoeff2D demos StdEquiToCoeff2DSource) -TARGET_LINK_LIBRARIES(StdEquiToCoeff2D ${LinkLibraries}) - -SET(StdProject3DSource StdProject3D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject3D demos StdProject3DSource) -TARGET_LINK_LIBRARIES(StdProject3D ${LinkLibraries}) - -SET(StdProject_Diff3DSource StdProject_Diff3D.cpp) -ADD_NEKTAR_EXECUTABLE(StdProject_Diff3D demos StdProject_Diff3DSource) -TARGET_LINK_LIBRARIES(StdProject_Diff3D ${LinkLibraries}) - -# Generate list of available subdirectories -FILE(GLOB dir_list "*") -LIST(REMOVE_ITEM dir_list "${CMAKE_SOURCE_DIR}/.svn") -FOREACH(dir ${dir_list}) - IF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) - ADD_SUBDIRECTORY(${dir}) - ENDIF(IS_DIRECTORY ${dir} AND EXISTS ${dir}/CMakeLists.txt) -ENDFOREACH(dir ${dir_list}) +ADD_NEKTAR_EXECUTABLE(StdProject0D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject0D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject1D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject1D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject2D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject2D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject3D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject3D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject_Diff1D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject_Diff1D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject_Diff2D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject_Diff2D.cpp) +ADD_NEKTAR_EXECUTABLE(StdProject_Diff3D + COMPONENT demos DEPENDS StdRegions SOURCES StdProject_Diff3D.cpp) +ADD_NEKTAR_EXECUTABLE(StdEquiToCoeff2D + COMPONENT demos DEPENDS StdRegions SOURCES StdEquiToCoeff2D.cpp) ADD_NEKTAR_TEST(StdProject1D_Seg_Orth_P6_Q7) ADD_NEKTAR_TEST(StdProject1D_Seg_Mod_P6_Q7) @@ -65,6 +39,7 @@ ADD_NEKTAR_TEST(StdProject3D_Tet_Orth_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Tet_Mod_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Prism_Orth_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Prism_Mod_P6_Q7) +ADD_NEKTAR_TEST(StdProject3D_Pyr_Ortho_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Pyr_Mod_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Hex_Orth_P6_Q7) ADD_NEKTAR_TEST(StdProject3D_Hex_Mod_P6_Q7) diff --git a/library/Demos/StdRegions/ExtraDemos/AliasingProject2D.cpp b/library/Demos/StdRegions/ExtraDemos/AliasingProject2D.cpp deleted file mode 100644 index 28ce0784233dab1cbd1439e39367b383e9e60986..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/AliasingProject2D.cpp +++ /dev/null @@ -1,293 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -double Tri_sol(double x, double y, int order1, int order2); -double Quad_sol(double x, double y, int order1, int order2, - BasisType btype1, BasisType btype2); - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 AliasingProject2D - -// This routine projects a polynomial or trigonmetric functions which -// has energy in all mdoes of the expansions and reports and error - -main(int argc, char *argv[]) -{ - int i,j,k,l; - const double *z1,*z2,*w; - int order1,order2, nq1,nq2; - PointsType Qtype1,Qtype2; - BasisType btype1,btype2; - ShapeType regionshape; - StdExpansion2D *E; - double *sol; - - if(argc != 8) - { - fprintf(stderr,"Usage: AliasingProject2D RegionShape Type1 Type2 exporder1" - "exporder2 nq1 nq2 \n"); - - fprintf(stderr,"Where RegionShape is an integer value which " - "dictates the region shape:\n"); - fprintf(stderr,"\t Triangle = 2\n"); - fprintf(stderr,"\t Quadrilateral = 3\n"); - - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Ortho_B = 1\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Modified_B = 4\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - fprintf(stderr,"Note type = 2,5 are for three-dimensional basis\n"); - - exit(1); - } - - regionshape = (ShapeType) atoi(argv[1]); - - // Check to see if 2D region - if((regionshape != eTriangle)&&(regionshape != eQuadrilateral)) - { - ErrorUtil::Error(ErrorUtil::efatal,"AliasingProject2D", - "This shape is not a 2D region"); - } - - btype1 = (BasisType) atoi(argv[2]); - btype2 = (BasisType) atoi(argv[3]); - - // Check to see that correct Expansions are used - switch(regionshape) - { - case eTriangle: - if((btype1 == eOrtho_B)||(btype1 == eModified_B)) - { - ErrorUtil::Error(ErrorUtil::efatal,"Project2D", - "Basis 1 cannot be of type Ortho_B or Modified_B"); - } - - if((btype2 != eOrtho_B)&&(btype2 != eModified_B)) - { - ErrorUtil::Error(ErrorUtil::efatal,"Project2D", - "Basis 2 must be of type Ortho_B or Modified_B"); - } - break; - case eQuadrilateral: - if((btype1 == eOrtho_B)||(btype1 == eOrtho_B)|| - (btype1 == eModified_B)||(btype1 == eModified_C)) - { - ErrorUtil::Error(ErrorUtil::efatal,"Project2D", - "Basis 1 is for 2 or 3D expansions"); - } - - if((btype2 == eOrtho_B)||(btype2 == eOrtho_B)|| - (btype2 == eModified_B)||(btype2 == eModified_C)) - { - ErrorUtil::Error(ErrorUtil::efatal,"Project2D", - "Basis 2 is for 2 or 3D expansions"); - } - break; - } - - order1 = atoi(argv[4]); - order2 = atoi(argv[5]); - nq1 = atoi(argv[6]); - nq2 = atoi(argv[7]); - - sol = new double [nq1*nq2]; - - if(btype1 != eFourier) - { - Qtype1 = eLobatto; - } - else - { - Qtype1 = eFourierEvenSp; - } - - if(btype2 != eFourier) - { - Qtype2 = eLobatto; - } - else - { - Qtype2 = eFourierEvenSp; - } - - - //----------------------------------------------- - // Define a segment expansion based on basis definition - - switch(regionshape) - { - case eTriangle:{ - const BasisKey b1(btype1,order1,Qtype1,nq1,0,0); - const BasisKey b2(btype2,order2,eRadauM,nq2,1,0); - - E = new StdTriExp (b1,b2); - - E->GetZW(0,z1,w); - E->GetZW(1,z2,w); - - //---------------------------------------------- - // Define solution to be projected - double x,y; - for(i = 0; i < nq1; ++i) - { - for(j = 0; j < nq2; ++j) - { - x = (1+z1[i])*(1-z2[j])/2-1.0; - y = z2[j]; - sol[i+nq1*j] = Tri_sol(x,y,order1,order2); - sol[i+nq1*j] *= sol[i+nq1*j]; - } - } - //---------------------------------------------- - } - break; - case eQuadrilateral: - { - const BasisKey b1(btype1,order1,Qtype1,nq1,0,0); - const BasisKey b2(btype2,order2,Qtype2,nq2,0,0); - E = new StdQuadExp (b1,b2); - - //---------------------------------------------- - // Define solution to be projected - - E->GetZW(0,z1,w); - E->GetZW(1,z2,w); - - for(i = 0; i < nq1; ++i) - { - for(j = 0; j < nq2; ++j) - { - sol[i*nq1 +j] = Quad_sol(z1[i],z2[j],order1,order2,btype1,btype2); - sol[i*nq1 +j] *= sol[i*nq1 +j]; - } - } - //--------------------------------------------- - } - break; - } - - - //--------------------------------------------- - // Project onto Expansion - E->FwdTrans(sol); - //--------------------------------------------- - - // -------------------------------------------- - // Dump coefficients to file - ofstream out("AliasingModes.m"); - E->WriteCoeffsToFile(out); - //-------------------------------------------- - - //------------------------------------------- - // Backward Transform Solution to get projected values - E->BwdTrans(E->GetPhys()); - //------------------------------------------- - - // -------------------------------------------- - // Dump physical points to file - ofstream out1("AliasingModes.dat"); - E->WriteToFile(out1); - //-------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - cout << "L infinity error: " << E->Linf(sol) << endl; - cout << "L 2 error: " << E->L2 (sol) << endl; - //-------------------------------------------- - - delete[] sol; -} - -double Tri_sol(double x, double y, int order1, int order2) -{ - int l,k; - double sol = 0; - - for(k = 0; k < order1; ++k) - { - for(l = 0; l < order2-k; ++l) - { - sol += pow(x,k)*pow(y,l); - } - } - - return sol; -} - -double Quad_sol(double x, double y, int order1, int order2, BasisType btype1, - BasisType btype2) -{ - - int k,l; - double sol = 0; - - if(btype1 != eFourier) - { - if(btype2 != eFourier) - { - for(k = 0; k < order1; ++k) - { - for(l = 0; l < order2; ++l) - { - sol += pow(x,k)*pow(y,l); - } - } - } - else - { - for(k = 0; k < order1; ++k) - { - for(l = 0; l < order2/2; ++l) - { - sol += pow(x,k)*sin(M_PI*l*y) + pow(x,k)*cos(M_PI*l*y); - } - } - } - } - else - { - if(btype2 != eFourier){ - for(k = 0; k < order1/2; ++k) - { - for(l = 0; l < order2; ++l) - { - sol += sin(M_PI*k*x)*pow(y,l) + cos(M_PI*k*x)*pow(y,l); - } - } - } - else - { - for(k = 0; k < order1/2; ++k) - { - for(l = 0; l < order2/2; ++l) - { - sol += sin(M_PI*k*x)*sin(M_PI*l*y) - + sin(M_PI*k*x)*cos(M_PI*l*y) - + cos(M_PI*k*x)*sin(M_PI*l*y) - + cos(M_PI*k*x)*cos(M_PI*l*y); - } - } - } - } - - return sol; -} diff --git a/library/Demos/StdRegions/ExtraDemos/Basis1D.cpp b/library/Demos/StdRegions/ExtraDemos/Basis1D.cpp deleted file mode 100644 index 7d4df8a82bdcdb1386b1f669bcfac98fa3bca49e..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/Basis1D.cpp +++ /dev/null @@ -1,78 +0,0 @@ -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 Basis1D - -// This routine generates a tecplot output file of a 1D basis - -main(int argc, char *argv[]) -{ - int i,j; - int order, nq; - const double *z,*w; - const double *b; - BasisType btype; - BasisManager B; - - - if(argc != 4) - { - fprintf(stderr,"Usage: Basis1D Type order nq \n"); - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Ortho_B = 1\n"); - fprintf(stderr,"\t Ortho_C = 2\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Modified_B = 4\n"); - fprintf(stderr,"\t Modified_C = 5\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - fprintf(stderr,"Note type = 0,1,2,4,5 are for higher dimensional basis\n"); - - exit(1); - } - - btype = (BasisType) atoi(argv[1]); - order = atoi(argv[2]); - nq = atoi(argv[3]); - - //----------------------------------------------- - // Define points at which basis is to be evaluated - B.GetZW(StdRegions::eLobatto,nq,z,w,0,0); - //----------------------------------------------- - - - //---------------------------------------------- - // Generate basis - b = B.GetBasisArray(btype, order, eLobatto, nq, 0.0, 0.0); - //---------------------------------------------- - - - //---------------------------------------------- - // Output 1D basis using only basis information. - - fprintf(stdout,"VARIABLES = z phi(z)\n"); - - for(i = 0; i < order; ++i) - { - fprintf(stdout,"ZONE T = \"Mode %d\" I=%d\n",i+1,nq); - for(j = 0; j < nq; ++j) - { - fprintf(stdout, "%9.6lf %9.6lf \n",z[j],b[i*nq+j]); - } - } - //----------------------------------------------- - -} diff --git a/library/Demos/StdRegions/ExtraDemos/HMatrix1D.cpp b/library/Demos/StdRegions/ExtraDemos/HMatrix1D.cpp deleted file mode 100644 index da7a47d49051ed2ac8df1b497eb605013c69bbe7..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/HMatrix1D.cpp +++ /dev/null @@ -1,86 +0,0 @@ -#include -#include -#include -#include - -using namespace StdRegions; -using namespace std; - -/* - g++ -g -c HMatrix1D.cpp -I ../../../include -// Linux default - g++ -g -o HMatrix1D HMatrix1D.o -L../../ -lStdRegions -lPolylib -lm -lblas -lg2c -// MaxOSX default - g++ -g -o HMatrix1D HMatrix1D.o -L../../ -lStdRegions -lPolylib -lm -framework Accelerate -*/ - -int main(int argc, char *argv[]){ - int i,j; - int order, nq; - const double *z,*w; - double *hmat; - PointType Qtype; - BasisType btype; - StdSegExp *E; - - if(argc != 4){ - fprintf(stderr,"Usage: MMatrix1D Type order nq \n"); - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); - - exit(1); - } - - btype = (BasisType) atoi(argv[1]); - - // Check to see that only 1D Expansions are used - if((btype == Ortho_B) ||(btype == Ortho_B)|| - (btype == Modified_B)||(btype == Modified_C)) - ErrorUtil::Error(fatal,"HMatrix1D", - "This basis is for 2 or 3D expansions"); - - order = atoi(argv[2]); - nq = atoi(argv[3]); - - if(btype != Fourier) - Qtype = Lobatto; - else - Qtype = FourierEvenSp; - - - BasisKey BK(btype,order,Qtype,nq,0,0); - - //----------------------------------------------- - // Define a segment expansion based on basis definition - E = new StdSegExp(BK); - //----------------------------------------------- - - // --------------------------------------------- - // Generate mass matrix based upon basis definition and put in mmat - hmat = (E->GetLapMatrix())->get_matrix(); - //-------------------------------------------- - - //---------------------------------------------- - // Dump mass matrix - - fprintf(stdout,"VARIABLES = hmat\n"); - - fprintf(stdout,"ZONE T = \"Helmholtz Matrix order %d\" I=%d J=%d\n",order,order, - order); - for(i = 0; i < order; ++i){ - for(j = 0; j < order; ++j) - fprintf(stdout, "%9.6lf ",hmat[i*order+j]); - fputc('\n',stdout); - } - //----------------------------------------------- - return 0; -} diff --git a/library/Demos/StdRegions/ExtraDemos/MMatrix1D.cpp b/library/Demos/StdRegions/ExtraDemos/MMatrix1D.cpp deleted file mode 100644 index 5db6dfa01ef667502e4cad62effdf53637c88183..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/MMatrix1D.cpp +++ /dev/null @@ -1,78 +0,0 @@ -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 PROG=MMatrix1D - -main(int argc, char *argv[]){ - int i,j; - int order, nq; - const double *z,*w; - PointsType Qtype; - BasisType btype; - StdSegExp *E; - - if(argc != 4) - { - fprintf(stderr,"Usage: MMatrix1D Type order nq \n"); - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); - - exit(1); - } - - btype = (BasisType) atoi(argv[1]); - - // Check to see that only 1D Expansions are used - if((btype == eOrtho_B)||(btype == eOrtho_B)|| - (btype == eModified_B)||(btype == eModified_C)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix1D", - "This basis is for 2 or 3D expansions"); - } - - order = atoi(argv[2]); - nq = atoi(argv[3]); - - if(btype != eFourier) - { - Qtype = eLobatto; - } - else - { - Qtype = eFourierEvenSp; - } - - - BasisKey BK(btype,order,Qtype,nq,0,0); - - //----------------------------------------------- - // Define a segment expansion based on basis definition - E = new StdSegExp (BK); - //----------------------------------------------- - - // --------------------------------------------- - // Generate mass matrix based upon basis definition and dump to stdout - (E->GetMassMatrix())->DumpMatrix(stdout); - //-------------------------------------------- - - // --------------------------------------------- - // Show Matrix structure - (E->GetMassMatrix())->ShowMatrixStructure(stdout); - //-------------------------------------------- -} diff --git a/library/Demos/StdRegions/ExtraDemos/MMatrix2D.cpp b/library/Demos/StdRegions/ExtraDemos/MMatrix2D.cpp deleted file mode 100644 index c4ef76da76f962482cc7eb0d663e295df628aac5..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/MMatrix2D.cpp +++ /dev/null @@ -1,154 +0,0 @@ -#include -#include -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 -// PROG=MMatrix2D //Gosse: "PROG=" should be removed - -main(int argc, char *argv[]) -{ - int i,j; - int order1,order2, nq1,nq2; - PointsType Qtype1,Qtype2; - BasisType btype1,btype2; - ShapeType regionshape; - StdExpansion *E; - - if(argc != 8) - { - fprintf(stderr,"Usage: MMatrix2D RegionShape Type1 Type2 order1 " - "order2 nq1 nq2 \n"); - - fprintf(stderr,"Where RegionShape is an integer value which " - "dictates the region shape:\n"); - fprintf(stderr,"\t Triangle = 1\n"); //Gosse: should be: - //Triangle =2 - fprintf(stderr,"\t Quadrilateral = 2\n"); //Gosse: should be: - //Quadrilateral = 3 - - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Ortho_B = 1\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Modified_B = 4\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - fprintf(stderr,"Note type = 2,5 are for three-dimensional basis\n"); - - exit(1); - } - - regionshape = (ShapeType) atoi(argv[1]); - - // Check to see if 2D region - if((regionshape != eTriangle)&&(regionshape != eQuadrilateral)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix2D","This shape is not a 2D region"); - } - - btype1 = (BasisType) atoi(argv[2]); - btype2 = (BasisType) atoi(argv[3]); - - // Check to see that correct Expansions are used - switch(regionshape) - { - case eTriangle: - if((btype1 == eOrtho_B)||(btype1 == eModified_B)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix2D", - "Basis 1 cannot be of type Ortho_B or Modified_B"); - } - - if((btype2 != eOrtho_B)&&(btype2 != eModified_B)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix2D", - "Basis 2 must be of type Ortho_B or Modified_B"); - } - break; - case eQuadrilateral: - if((btype1 == eOrtho_B)||(btype1 == eOrtho_B)|| - (btype1 == eModified_B)||(btype1 == eModified_C)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix2D", - "Basis 1 is for 2 or 3D expansions"); - } - - if((btype2 == eOrtho_B)||(btype2 == eOrtho_B)|| - (btype2 == eModified_B)||(btype2 == eModified_C)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix2D", - "Basis 2 is for 2 or 3D expansions"); - } - break; - } - - order1 = atoi(argv[4]); - order2 = atoi(argv[5]); - nq1 = atoi(argv[6]); - nq2 = atoi(argv[7]); - - if(btype1 != eFourier) - { - Qtype1 = eLobatto; - } - else - { - Qtype1 = eFourierEvenSp; - } - - if(btype2 != eFourier) - { - Qtype2 = eLobatto; - } - else - { - Qtype2 = eFourierEvenSp; - } - - //----------------------------------------------- - // Define a segment expansion based on basis definition - - switch(regionshape) - { - case eTriangle: - { - const BasisKey b1(btype1,order1,Qtype1,nq1,0,0); - const BasisKey b2(btype2,order2,Qtype2,nq2,1,0); - E = new StdTriExp (b1,b2); - break; - } - case eQuadrilateral: - { - const BasisKey b1(btype1,order1,Qtype1,nq1,0,0); - const BasisKey b2(btype2,order2,Qtype2,nq2,0,0); - E = new StdQuadExp (b1,b2); - break; - } - } - - - // --------------------------------------------- - // Generate mass matrix based upon basis definition and dump to stdout - (E->GetMassMatrix())->DumpMatrix(stdout); - //-------------------------------------------- - - - // --------------------------------------------- - // Show Matrix structure - (E->GetMassMatrix())->ShowMatrixStructure(stdout); - //-------------------------------------------- -} diff --git a/library/Demos/StdRegions/ExtraDemos/MMatrix3D.cpp b/library/Demos/StdRegions/ExtraDemos/MMatrix3D.cpp deleted file mode 100644 index 5eb59ab54a8ba456a20c8579f1da86ad529feb92..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/MMatrix3D.cpp +++ /dev/null @@ -1,150 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 -// PROG=MMatrix3D //Gosse: should be: make DEBUG=1 MMatrix3D - -main(int argc, char *argv[]) -{ - int i,j; - int order1,order2,order3,nq1,nq2,nq3; - PointsType Qtype1,Qtype2,Qtype3; - BasisType btype1,btype2,btype3; - ShapeType regionshape; - StdExpansion *E; - - if(argc != 11) - { - fprintf(stderr,"Usage: MMatrix3D RegionShape Type1 Type2 Type3 order1 " - "order2 order3 nq1 nq2 nq3 \n"); - - fprintf(stderr,"Where RegionShape is an integer value which " - "dictates the region shape:\n"); - fprintf(stderr,"\t Tetrahedron = 4\n"); - fprintf(stderr,"\t Pyramid = 5\n"); - fprintf(stderr,"\t Prism = 6\n"); - fprintf(stderr,"\t Hexahedron = 7\n"); - - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - - fprintf(stderr,"\t Ortho_A = 0\n"); - fprintf(stderr,"\t Ortho_B = 1\n"); - fprintf(stderr,"\t Ortho_C = 2\n"); - fprintf(stderr,"\t Modified_A = 3\n"); - fprintf(stderr,"\t Modified_B = 4\n"); - fprintf(stderr,"\t Modified_C = 5\n"); - fprintf(stderr,"\t Fourier = 6\n"); - fprintf(stderr,"\t Lagrange = 7\n"); - fprintf(stderr,"\t Legendre = 8\n"); - fprintf(stderr,"\t Chebyshev = 9\n"); - - exit(1); - } - - regionshape = (ShapeType) atoi(argv[1]); - - // Check to see if 3D region - if((regionshape != eTetrahedron)&&(regionshape != eHexahedron)&& - (regionshape != ePrism)&&(regionshape != ePyramid)) - { - ErrorUtil::Error(ErrorUtil::efatal,"MMatrix3D","This shape is " - "not a 3D region"); - } - - btype1 = (BasisType) atoi(argv[2]); - btype2 = (BasisType) atoi(argv[3]); - btype3 = (BasisType) atoi(argv[4]); - - // Check to see that correct Expansions are used - - order1 = atoi(argv[5]); - order2 = atoi(argv[6]); - order3 = atoi(argv[7]); - - nq1 = atoi(argv[8]); - nq2 = atoi(argv[9]); - nq3 = atoi(argv[9]); - - if(btype1 != eFourier) - { - Qtype1 = eLobatto; - } - else - { - Qtype1 = eFourierEvenSp; - } - - if(btype2 != eFourier) - { - Qtype2 = eLobatto; - } - else - { - Qtype2 = eFourierEvenSp; - } - - if(btype3 != eFourier) - { - Qtype3 = eLobatto; - } - else - { - Qtype3 = eFourierEvenSp; - } - - - //----------------------------------------------- - // Define a segment expansion based on basis definition - - switch(regionshape) - { - case eTetrahedron: - fprintf(stderr,"Tetrahedron is not yet set up\n"); - exit(1); - break; - - case ePyramid: - fprintf(stderr,"Pyramid is not yet set up\n"); - exit(1); - break; - - case ePrism: - fprintf(stderr,"Prism is not yet set up\n"); - exit(1); - break; - - case eHexahedron: - { - const BasisKey b1(btype1,order1,Qtype1,nq1,0,0); - const BasisKey b2(btype2,order2,Qtype2,nq2,0,0); - const BasisKey b3(btype3,order3,Qtype3,nq3,0,0); - E = new StdHexExp (b1,b2,b3); - break; - } - } - - - // --------------------------------------------- - // Generate mass matrix based upon basis definition and dump to stdout - (E->GetMassMatrix())->DumpMatrix(stdout); - //-------------------------------------------- - - - // --------------------------------------------- - // Show Matrix structure - (E->GetMassMatrix())->ShowMatrixStructure(stdout); - //-------------------------------------------- -} diff --git a/library/Demos/StdRegions/ExtraDemos/NBasisTrans2D.cpp b/library/Demos/StdRegions/ExtraDemos/NBasisTrans2D.cpp deleted file mode 100644 index 9c5429242fe181d0973101031669dc3f93eac3b0..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/NBasisTrans2D.cpp +++ /dev/null @@ -1,111 +0,0 @@ -#include -#include -#include -#include -#include - -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - -// compile using Builds/Demos/StdRegions -> make DEBUG=1 NBasisTrans2D - -// This routine generates a basis transformation from a nodal expansion to -// the orthogonal expansion - -int main(int argc, char *argv[]) -{ - - int i,j,k,l; - const double *z1,*z2,*w; - int order, nq1,nq2; - StdExpansion2D *E; - double *sol,*coeffs; - - if(argc != 3) - { - fprintf(stderr,"Usage: NBasisTrans2D PointDis order \n"); - - fprintf(stderr,"Where PointDist is an integer value which " - "dictates the nodal distrbituion:\n"); - fprintf(stderr,"\t Electrostatic points = 0\n"); - fprintf(stderr,"\t Fekete points = 1\n"); - - exit(1); - } - - order = atoi(argv[argc-1]); - - // generate Nodal triangle expansion - NodalBasisType btype = (NodalBasisType) atoi(argv[argc-2]); - - const BasisKey b1(eOrtho_A,order,eLobatto,order+1,0,0); - const BasisKey b2(eOrtho_B,order,eRadauM, order,1,0); - // const BasisKey b1(Modified_A,order,Lobatto,order+1,0,0); - // const BasisKey b2(Modified_B,order,RadauM, order,1,0); - - E = new StdNodalTriExp (b1,b2,btype); - - nq1 = E->GetPointsOrder(0); - nq2 = E->GetPointsOrder(1); - sol = new double [nq1*nq2]; - - coeffs = E->GetCoeffs(); - E->GetZW(0,z1,w); - E->GetZW(1,z2,w); - - //---------------------------------------------- - // Define solution at Nodal points and quadrature points - - double x,y; - for(i = 0; i < nq1; ++i) - { - for(j = 0; j < nq2; ++j) - { - x = (1+z1[i])*(1-z2[j])/2-1.0; - y = z2[j]; - sol[i+nq1*j] = 0.0; - // activate all modes - for(k = 0; k < order; ++k) - { - for(l = 0; l < order-k; ++l) - { - sol[i+nq1*j] += pow(x,k)*pow(y,l); - } - } - } - } - - const double *xp, *yp; - int npts = E->GetNodalPoints(xp,yp); - - for(i = 0; i < npts; ++i) - { - coeffs[i] = 0.0; - // activate all modes - for(k = 0; k < order; ++k) - { - for(l = 0; l < order-k; ++l) - { - coeffs[i] += pow(xp[i],k)*pow(yp[i],l); - } - } - } - - - //------------------------------------------- - // Backward Transform Solution to get projected values - E->BwdTrans(E->GetPhys()); - //------------------------------------------- - - //-------------------------------------------- - // Calculate L_inf error - cout << "L infinity error: " << E->Linf(sol) << endl; - cout << "L 2 error: " << E->L2 (sol) << endl; - //-------------------------------------------- - - delete[] sol; - return 0; -} diff --git a/library/Demos/StdRegions/ExtraDemos/NodalBasis.cpp b/library/Demos/StdRegions/ExtraDemos/NodalBasis.cpp deleted file mode 100644 index 35714f3f56d46fda781f808b43cf19abd87b05f3..0000000000000000000000000000000000000000 --- a/library/Demos/StdRegions/ExtraDemos/NodalBasis.cpp +++ /dev/null @@ -1,80 +0,0 @@ -#include -#include -#include - -#include -#include - -#include -#include - -using namespace Nektar; -using namespace StdRegions; -using namespace std; - - -// This routine projects a polynomial or trigonmetric functions which -// has energy in all mdoes of the expansions and report an error. - -int main(int argc, char *argv[]) -{ - int i,j; - int order; - int npts; - - LibUtilities::PointsType Qtype1,Qtype2; - LibUtilities::BasisType btype1,btype2; - LibUtilities::PointsType NodalType; - StdRegions::StdNodalTriExp *E; - Array r,s; - - if(argc != 3) - { - fprintf(stderr,"Usage: NodalBasis Type order\n"); - - fprintf(stderr,"Where type is an integer value which " - "dictates the basis as:\n"); - fprintf(stderr,"\t NodalTriElec = 0\n"); - fprintf(stderr,"\t NodalTriFekete = 1\n"); - exit(1); - } - - btype1 = LibUtilities::eOrtho_A; - btype2 = LibUtilities::eOrtho_B; - Qtype1 = LibUtilities::eGaussLobattoLegendre; - Qtype2 = LibUtilities::eGaussLobattoLegendre; - - switch(atoi(argv[argc-2])) - { - case 0: - NodalType = LibUtilities::eNodalTriElec; - break; - case 1: - NodalType = LibUtilities::eNodalTriFekete; - break; - } - - order = atoi(argv[argc-1]); - ASSERTL0(order > 1,"Order must be larger than 1"); - - const LibUtilities::PointsKey Pkey1(order+1,Qtype1); - const LibUtilities::PointsKey Pkey2(order+1,Qtype2); - const LibUtilities::BasisKey Bkey1(btype1,order,Pkey1); - const LibUtilities::BasisKey Bkey2(btype2,order,Pkey2); - - E = new StdRegions::StdNodalTriExp(Bkey1,Bkey2,NodalType); - - E->GetNodalPoints(r,s); - - //---------------------------------------------- - // Output 1D basis using only basis information. - npts = order*(order+1)/2; - fprintf(stdout,"VARIABLES = x y\n"); - fprintf(stdout,"ZONE T = \"Order %d\" I=%d\n",order,npts); - - for(i = 0; i < npts; ++i) - { - fprintf(stdout, "%16.14lf %16.14lf \n",r[i],s[i]); - } - //----------------------------------------------- -} diff --git a/library/Demos/StdRegions/StdProject1D.cpp b/library/Demos/StdRegions/StdProject1D.cpp index 50073b752160b2d75d8f689fd377d98eb6ad697d..5cf37efb235c72d2751b09b5551fedaf91172f0c 100644 --- a/library/Demos/StdRegions/StdProject1D.cpp +++ b/library/Demos/StdRegions/StdProject1D.cpp @@ -26,15 +26,15 @@ int main(int argc, char *argv[]) "dictates the basis as:\n"); fprintf(stderr,"\t Ortho_A = 1\n"); fprintf(stderr,"\t Modified_A = 4\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Monomial = 12\n"); - fprintf(stderr,"\t FourierSingleMode = 13\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t FourierSingleMode = 15\n"); - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); + fprintf(stderr,"Note type = 1,2,4,5,7,8 are for higher dimensional basis\n"); exit(1); } diff --git a/library/Demos/StdRegions/StdProject2D.cpp b/library/Demos/StdRegions/StdProject2D.cpp index fdb413f5e27c60ad93b2a4cd0d1e30d41c605398..c828b8112d0d005fd66bc76d278aadfc60d123b5 100644 --- a/library/Demos/StdRegions/StdProject2D.cpp +++ b/library/Demos/StdRegions/StdProject2D.cpp @@ -51,17 +51,18 @@ int main(int argc, char *argv[]) fprintf(stderr,"\t Ortho_B = 2\n"); fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t FourierSingleMode = 12\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 13\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 14\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t FourierSingleMode = 15\n"); + fprintf(stderr,"\t Nodal tri (Electro) = 18\n"); + fprintf(stderr,"\t Nodal tri (Fekete) = 19\n"); - fprintf(stderr,"Note type = 3,6 are for three-dimensional basis\n"); + fprintf(stderr,"Note type = 3,6,7,8 are for three-dimensional basis\n"); exit(1); } @@ -77,17 +78,17 @@ int main(int argc, char *argv[]) int btype1_val = atoi(argv[2]); int btype2_val = atoi(argv[3]); - if(( btype1_val <= 12)&&( btype2_val <= 12)) + if(( btype1_val <= 15)&&( btype2_val <= 15)) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; } - else if(( btype1_val >=13)&&(btype2_val <= 14)) + else if(( btype1_val >=18)&&(btype2_val <= 19)) { btype1 = LibUtilities::eOrtho_A; btype2 = LibUtilities::eOrtho_B; - if(btype1_val == 13) + if(btype1_val == 18) { NodalType = LibUtilities::eNodalTriElec; } diff --git a/library/Demos/StdRegions/StdProject3D.cpp b/library/Demos/StdRegions/StdProject3D.cpp index 2cc73797e1bcf58e1b32963a25512bd87172521c..8d560e9cecf277a055e4654887b7686884008a33 100644 --- a/library/Demos/StdRegions/StdProject3D.cpp +++ b/library/Demos/StdRegions/StdProject3D.cpp @@ -21,6 +21,10 @@ using namespace Nektar::StdRegions; NekDouble Tet_sol(NekDouble x, NekDouble y, NekDouble z, int order1, int order2, int order3); +/// Defines a solution which excites all modes in a StdPyr expansion. +NekDouble Pyr_sol(NekDouble x, NekDouble y, NekDouble z, + int order1, int order2, int order3); + /// Defines a solution which excites all modes in a StdPrism expansion. NekDouble Prism_sol(NekDouble x, NekDouble y, NekDouble z, int order1, int order2, int order3); @@ -66,16 +70,18 @@ int main(int argc, char *argv[]){ fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); fprintf(stderr,"\t Modified_C = 6\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 12\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 13\n"); - fprintf(stderr,"\t Nodal tet (Electro) = 14\n"); - fprintf(stderr,"\t Nodal tet (Even) = 15\n"); - fprintf(stderr,"\t Nodal prism (Even) = 16\n"); + fprintf(stderr,"\t OrthoPyr_C = 7\n"); + fprintf(stderr,"\t ModifiedPyr_C = 8\n"); + + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t Nodal tet (Electro) = 15\n"); + fprintf(stderr,"\t Nodal tet (Even) = 16\n"); + fprintf(stderr,"\t Nodal prism (Even) = 17\n"); exit(1); } @@ -95,13 +101,13 @@ int main(int argc, char *argv[]){ int btype2_val = atoi(argv[3]); int btype3_val = atoi(argv[4]); - if (btype1_val <= 11 && btype2_val <= 11) + if (btype1_val <= 14 && btype2_val <= 14) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; btype3 = (LibUtilities::BasisType) btype3_val; } - else if(btype1_val >=12 && btype2_val <= 16) + else if(btype1_val >=15 && btype2_val <= 17) { if (regionshape == LibUtilities::eTetrahedron) { @@ -116,19 +122,11 @@ int main(int argc, char *argv[]){ btype3 = LibUtilities::eOrtho_B; } - if(btype1_val == 12) - { - NodalType = LibUtilities::eNodalTriElec; - } - else if (btype1_val == 13) - { - NodalType = LibUtilities::eNodalTriFekete; - } - else if (btype1_val == 14) + if (btype1_val == 15) { NodalType = LibUtilities::eNodalTetElec; } - else if (btype1_val == 15) + else if (btype1_val == 16) { NodalType = LibUtilities::eNodalTetEvenlySpaced; } @@ -162,23 +160,23 @@ int main(int argc, char *argv[]){ } break; case LibUtilities::ePyramid: - if((btype1 == eOrtho_B) || (btype1 == eOrtho_C) - || (btype1 == eModified_B) || (btype1 == eModified_C)) + if((btype1 == eOrtho_B) || (btype1 == eOrtho_C) || (btype1 == eOrthoPyr_C) + || (btype1 == eModified_B) || (btype1 == eModified_C) || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 1 cannot be of type Ortho_B, " "Ortho_C, Modified_B or Modified_C"); } - if((btype2 == eOrtho_B) || (btype2 == eOrtho_C) - || (btype2 == eModified_B) || (btype2 == eModified_C)) + if((btype2 == eOrtho_B) || (btype2 == eOrtho_C) || (btype1 == eOrthoPyr_C) + || (btype2 == eModified_B) || (btype2 == eModified_C) || (btype1 == eModifiedPyr_C)) { NEKERROR(ErrorUtil::efatal, "Basis 2 cannot be of type Ortho_B, " "Ortho_C, Modified_B or Modified_C"); } - if((btype3 == eOrtho_A) || (btype3 == eOrtho_B) - || (btype3 == eModified_A) || (btype3 == eModified_B)) + if((btype3 == eOrtho_A) || (btype3 == eOrtho_B) || (btype3 == eOrtho_C) + || (btype3 == eModified_A) || (btype3 == eModified_B) || (btype3 == eModified_C)) { NEKERROR(ErrorUtil::efatal, "Basis 3 cannot be of type Ortho_A, " - "Ortho_B, Modified_A or Modified_B"); + "Ortho_B, Ortho_C, Modified_A, Modified_B or Modified_C"); } break; case LibUtilities::ePrism: @@ -332,7 +330,7 @@ int main(int argc, char *argv[]){ // Define solution to be projected for(i = 0; i < nq1*nq2*nq3; ++i) { - sol[i] = Tet_sol(x[i],y[i],z[i],order1,order2,order3); + sol[i] = Pyr_sol(x[i],y[i],z[i],order1,order2,order3); } //---------------------------------------------- } @@ -422,11 +420,14 @@ int main(int argc, char *argv[]){ t[1] = -0.25; t[2] = -0.3; - if(regionshape == LibUtilities::eTetrahedron || - regionshape == LibUtilities::ePyramid) + if(regionshape == LibUtilities::eTetrahedron) { sol[0] = Tet_sol(t[0], t[1], t[2], order1, order2, order3); } + else if (regionshape == LibUtilities::ePyramid) + { + sol[0] = Pyr_sol(t[0], t[1], t[2], order1, order2, order3); + } else if (regionshape == LibUtilities::ePrism) { sol[0] = Prism_sol(t[0], t[1], t[2], order1, order2, order3); @@ -466,6 +467,28 @@ NekDouble Tet_sol(NekDouble x, NekDouble y, NekDouble z, return sol; } +NekDouble Pyr_sol(NekDouble x, NekDouble y, NekDouble z, + int order1, int order2, int order3) +{ + int l,k,m; + NekDouble sol = 0.0; + + for(k = 0; k < order1; ++k) + { + for(l = 0; l < order2-k; ++l) + { + for(m = 0; m < order3-k-l-1; ++m) + { + sol += pow(x,k)*pow(y,l)*pow(z,m); + } + } + } + + return sol; +} + + + NekDouble Prism_sol(NekDouble x, NekDouble y, NekDouble z, int order1, int order2, int order3) { diff --git a/library/Demos/StdRegions/StdProject_Diff1D.cpp b/library/Demos/StdRegions/StdProject_Diff1D.cpp index 736aaf6ce66c7fcfafd5a544b226f99633f3f430..4c7d8b1d7df8a265c60248b3d12182019acb0efe 100644 --- a/library/Demos/StdRegions/StdProject_Diff1D.cpp +++ b/library/Demos/StdRegions/StdProject_Diff1D.cpp @@ -26,15 +26,15 @@ int main(int argc, char *argv[]) "dictates the basis as:\n"); fprintf(stderr,"\t Ortho_A = 1\n"); fprintf(stderr,"\t Modified_A = 4\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Monomial = 12\n"); - fprintf(stderr,"\t FourierSingleMode = 13\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t FourierSingleMode = 15\n"); - fprintf(stderr,"Note type = 1,2,4,5 are for higher dimensional basis\n"); + fprintf(stderr,"Note type = 1,2,4,5,7,8 are for higher dimensional basis\n"); exit(1); } diff --git a/library/Demos/StdRegions/StdProject_Diff2D.cpp b/library/Demos/StdRegions/StdProject_Diff2D.cpp index b87677644964eaa722261c4cec88c4e9d2b33df7..da8bbac17b8898fd45f6a235378d94253d0bf5bc 100644 --- a/library/Demos/StdRegions/StdProject_Diff2D.cpp +++ b/library/Demos/StdRegions/StdProject_Diff2D.cpp @@ -56,15 +56,15 @@ int main(int argc, char *argv[]) fprintf(stderr,"\t Ortho_B = 2\n"); fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 13\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 14\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Nodal tri (Electro) = 14\n"); + fprintf(stderr,"\t Nodal tri (Fekete) = 15\n"); - fprintf(stderr,"Note type = 3,6 are for three-dimensional basis\n"); + fprintf(stderr,"Note type = 3,6,7,8 are for three-dimensional basis\n"); exit(1); } @@ -80,17 +80,17 @@ int main(int argc, char *argv[]) int btype1_val = atoi(argv[2]); int btype2_val = atoi(argv[3]); - if(( btype1_val <= 11)&&( btype2_val <= 11)) + if(( btype1_val <= 13)&&( btype2_val <= 13)) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; } - else if(( btype1_val >=13)&&(btype2_val <= 14)) + else if(( btype1_val >=15)&&(btype2_val <= 16)) { btype1 = LibUtilities::eOrtho_A; btype2 = LibUtilities::eOrtho_B; - if(btype1_val == 13) + if(btype1_val == 15) { NodalType = LibUtilities::eNodalTriElec; } diff --git a/library/Demos/StdRegions/StdProject_Diff3D.cpp b/library/Demos/StdRegions/StdProject_Diff3D.cpp index 5f70101e63deee41205b0d863233e705444544ea..6d5d7919a2ed4b56930579f5b6b32fc1f847ba28 100644 --- a/library/Demos/StdRegions/StdProject_Diff3D.cpp +++ b/library/Demos/StdRegions/StdProject_Diff3D.cpp @@ -87,13 +87,17 @@ int main(int argc, char *argv[]){ fprintf(stderr,"\t Modified_A = 4\n"); fprintf(stderr,"\t Modified_B = 5\n"); fprintf(stderr,"\t Modified_C = 6\n"); - fprintf(stderr,"\t Fourier = 7\n"); - fprintf(stderr,"\t Lagrange = 8\n"); - fprintf(stderr,"\t Gauss Lagrange = 9\n"); - fprintf(stderr,"\t Legendre = 10\n"); - fprintf(stderr,"\t Chebyshev = 11\n"); - fprintf(stderr,"\t Nodal tri (Electro) = 12\n"); - fprintf(stderr,"\t Nodal tri (Fekete) = 13\n"); + fprintf(stderr,"\t OrthoPyr_C = 7\n"); + fprintf(stderr,"\t ModifiedPyr_C = 8\n"); + fprintf(stderr,"\t Fourier = 9\n"); + fprintf(stderr,"\t Lagrange = 10\n"); + fprintf(stderr,"\t Gauss Lagrange = 11\n"); + fprintf(stderr,"\t Legendre = 12\n"); + fprintf(stderr,"\t Chebyshev = 13\n"); + fprintf(stderr,"\t Monomial = 14\n"); + fprintf(stderr,"\t Nodal tet (Electro) = 15\n"); + fprintf(stderr,"\t Nodal tet (Even) = 16\n"); + fprintf(stderr,"\t Nodal prism (Even) = 17\n"); exit(1); } @@ -112,17 +116,26 @@ int main(int argc, char *argv[]){ int btype1_val = atoi(argv[2]); int btype2_val = atoi(argv[3]); int btype3_val = atoi(argv[4]); - if(( btype1_val <= 11)&&( btype2_val <= 11)) + if(( btype1_val <= 15)&&( btype2_val <= 15)) { btype1 = (LibUtilities::BasisType) btype1_val; btype2 = (LibUtilities::BasisType) btype2_val; btype3 = (LibUtilities::BasisType) btype3_val; } - else if(( btype1_val >=12)&&(btype2_val <= 13)) + else if(( btype1_val >=15)&&(btype2_val <= 17)) { - btype1 = LibUtilities::eOrtho_A; - btype2 = LibUtilities::eOrtho_B; - btype3 = LibUtilities::eOrtho_C; + if (regionshape == LibUtilities::eTetrahedron) + { + btype1 = LibUtilities::eOrtho_A; + btype2 = LibUtilities::eOrtho_B; + btype3 = LibUtilities::eOrtho_C; + } + else if (regionshape == LibUtilities::ePrism) + { + btype1 = LibUtilities::eOrtho_A; + btype2 = LibUtilities::eOrtho_A; + btype3 = LibUtilities::eOrtho_B; + } } // Check to see that correct Expansions are used diff --git a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_P6_Q8.tst b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_P6_Q8.tst index 191178fc330728eff8687ae2ff0397de1a590d15..747189f01b49de87ae6e493e119ad62d1c4d15cc 100644 --- a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_P6_Q8.tst +++ b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_P6_Q8.tst @@ -2,7 +2,7 @@ StdProject2D Quadrilateral Fourier basis P=6 Q=8 StdProject2D - 4 7 7 6 6 8 8 + 4 9 9 6 6 8 8 1.06892e-15 diff --git a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_Single_Mode_P2_Q6.tst b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_Single_Mode_P2_Q6.tst index 446bad552e0a09e7b0099d678d3f591f60d17120..bd3c7eff56fae6b37e35d16eb07871210bc1d54a 100644 --- a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_Single_Mode_P2_Q6.tst +++ b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Fourier_Single_Mode_P2_Q6.tst @@ -1,8 +1,8 @@ - StdProject2D Quadrilateral Fourier Single Mode basis P=2 Q=6 + StdProject2D Quadrilateral Fourier Single Mode basis P=2 Q=2 StdProject2D - 4 12 12 2 2 6 6 + 4 15 15 2 2 2 2 3.59678e-16 diff --git a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Lagrange_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Lagrange_P6_Q7.tst index 1a491f8a769758eb4e39f3c86cb5005dfa337e50..a64a66b03de2042bfd54b747cf56fdf53c87014d 100644 --- a/library/Demos/StdRegions/Tests/StdProject2D_Quad_Lagrange_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject2D_Quad_Lagrange_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject2D Quadrilateral Lagrange basis P=6 Q=7 StdProject2D - 4 8 8 6 6 7 7 + 4 10 10 6 6 7 7 4.62859e-15 diff --git a/library/Demos/StdRegions/Tests/StdProject2D_Tri_Nodal_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject2D_Tri_Nodal_P6_Q7.tst index 6744d3561fe9e1656b6e50439bc4c94fd85ff16b..1b50f912ddbb1f955d8a8fae723e756585bd5994 100644 --- a/library/Demos/StdRegions/Tests/StdProject2D_Tri_Nodal_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject2D_Tri_Nodal_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject2D Triangle Nodal basis P=6 Q=7 StdProject2D - 3 13 12 6 6 7 7 + 3 18 18 6 6 7 7 1.78107e-15 diff --git a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Chebyshev_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Chebyshev_P6_Q7.tst index 28aa553682a0ab289c8cc0b85d84a689f089a7d2..26e7f966d1b000e8e4db8df4da10d7941ef5b8b1 100644 --- a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Chebyshev_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Chebyshev_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject3D Hexahedron Chebyshev basis P=6 Q=7 StdProject3D - 8 10 10 10 6 6 6 7 7 7 + 8 13 13 13 6 6 6 7 7 7 4.11256e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Lagrange_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Lagrange_P6_Q7.tst index a2d94ce7f7a795a79fa971410f78f5c91d2c0891..94bd7b535b339f01b54aae954ff199d7bbe64e6d 100644 --- a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Lagrange_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Lagrange_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject3D Hexahedron Lagrange basis P=6 Q=7 StdProject3D - 8 8 8 8 6 6 6 7 7 7 + 8 10 10 10 6 6 6 7 7 7 3.06382e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Legendre_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Legendre_P6_Q7.tst index ebb17fab3d492219ae9ccc77242a951367bf8ae6..0fccab2b065e145ff23ffb4fc570699225716337 100644 --- a/library/Demos/StdRegions/Tests/StdProject3D_Hex_Legendre_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject3D_Hex_Legendre_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject3D Hexahedron Legendre basis P=6 Q=7 StdProject3D - 8 10 10 10 6 6 6 7 7 7 + 8 12 12 12 6 6 6 7 7 7 2.20528e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Mod_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Mod_P6_Q7.tst index 7a48e8ebac2afa94ad28f70b4cecb3cfcd4c0bd2..8e047fb0449ae31f96e59df7706fe1219a7f749a 100644 --- a/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Mod_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Mod_P6_Q7.tst @@ -2,13 +2,13 @@ StdProject3D Pyramid Modified basis P=6 Q=7 StdProject3D - 6 4 4 6 6 6 6 7 7 6 + 6 4 4 8 6 6 6 7 7 6 - 1.35652e-12 + 3.2756e-12 - 3.8245e-12 + 9.72786e-13 diff --git a/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Ortho_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Ortho_P6_Q7.tst new file mode 100644 index 0000000000000000000000000000000000000000..4d22f0bf3c41582fd75d2b1f2b9e829b1cb3d9cf --- /dev/null +++ b/library/Demos/StdRegions/Tests/StdProject3D_Pyr_Ortho_P6_Q7.tst @@ -0,0 +1,14 @@ + + + StdProject3D Pyramid Orthogonal basis P=6 Q=7 + StdProject3D + 6 1 1 7 6 6 6 7 7 6 + + + 5.32907e-15 + + + 5.32907e-15 + + + diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Fourier_P6_Q8.tst b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Fourier_P6_Q8.tst index d5452aa58cf6991ab633f2caf71c122c6e377815..1e67d3f3930c25fc521dc1c1f5aba22106046d71 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Fourier_P6_Q8.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Fourier_P6_Q8.tst @@ -2,7 +2,7 @@ StdProject_Diff2D Quadrilateral Fourier basis P=6 Q=8 StdProject_Diff2D - 4 7 7 6 6 8 8 + 4 9 9 6 6 8 8 1.54556e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Lagrange_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Lagrange_P6_Q7.tst index 3c98b2d74056d2bcf7926cbe71d3948d92cdbae8..5bf430ff47a2d20eeacd2884543b4048c3ccf69b 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Lagrange_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Quad_Lagrange_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff2D Quadrilateral Lagrange basis P=6 Q=7 StdProject_Diff2D - 4 8 8 6 6 7 7 + 4 10 10 6 6 7 7 4.14376e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Tri_Nodal_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Tri_Nodal_P6_Q7.tst index 2a3f56e29fd753b472f9c2858d9bfdc254c703ed..34cdab88df2bae3dc8c7e0042ab0b5b5a731afe1 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff2D_Tri_Nodal_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff2D_Tri_Nodal_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff2D Triangle Nodal basis P=6 Q=7 StdProject_Diff2D - 3 13 12 6 6 7 7 + 3 15 14 6 6 7 7 1.06405e-14 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Chebyshev_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Chebyshev_P6_Q7.tst index 41fc5397475cac6547fefb2bb65f160971b175e0..15adf255a0d5fb4bc754d218152a209f3bbac304 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Chebyshev_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Chebyshev_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff3D Hexahedron Chebyshev basis P=6 Q=7 StdProject_Diff3D - 8 10 10 10 6 6 6 7 7 7 + 8 13 13 13 6 6 6 7 7 7 5.62024e-13 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Lagrange_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Lagrange_P6_Q7.tst index 4dbc5360045462cd1b97e0f595831d1defae1dfe..1d4515f579af3a0dc14980f26902099bc90b43de 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Lagrange_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Lagrange_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff3D Hexahedron Lagrange basis P=6 Q=7 StdProject_Diff3D - 8 8 8 8 6 6 6 7 7 7 + 8 10 10 10 6 6 6 7 7 7 5.11314e-13 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Legendre_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Legendre_P6_Q7.tst index 9fce2b24666125460a0f5ace62811023875db562..af7ef4d22142e52b814d7838250953588af48081 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Legendre_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Hex_Legendre_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff3D Hexahedron Legendre basis P=6 Q=7 StdProject_Diff3D - 8 10 10 10 6 6 6 7 7 7 + 8 12 12 12 6 6 6 7 7 7 4.88883e-13 diff --git a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Pyr_Mod_P6_Q7.tst b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Pyr_Mod_P6_Q7.tst index c1de0289c4c0c6b76d2362bccdcd5399e5c0b2a4..38981d8ae936a319e372f64711299dd09212dfbd 100644 --- a/library/Demos/StdRegions/Tests/StdProject_Diff3D_Pyr_Mod_P6_Q7.tst +++ b/library/Demos/StdRegions/Tests/StdProject_Diff3D_Pyr_Mod_P6_Q7.tst @@ -2,7 +2,7 @@ StdProject_Diff3D Pyramid Modified basis P=6 Q=7 StdProject_Diff3D - 6 4 4 6 6 6 6 7 7 6 + 6 4 4 8 6 6 6 7 7 6 4.99628e-12 diff --git a/library/FieldUtils/CMakeLists.txt b/library/FieldUtils/CMakeLists.txt index 2d267f786c8c5dc275e693bac299b66d6bc8dc73..4ec3599e23c791e8b19b773cb285f7ea5e168d77 100644 --- a/library/FieldUtils/CMakeLists.txt +++ b/library/FieldUtils/CMakeLists.txt @@ -7,6 +7,8 @@ SET(FieldUtilsHeaders InputModules/InputXml.h InputModules/InputPts.h InputModules/InputNek5000.h + InputModules/InputSemtex.h + OutputModules/OutputFileBase.h OutputModules/OutputInfo.h OutputModules/OutputTecplot.h OutputModules/OutputVtk.h @@ -19,6 +21,7 @@ SET(FieldUtilsHeaders ProcessModules/ProcessBoundaryExtract.h ProcessModules/ProcessCombineAvg.h ProcessModules/ProcessConcatenateFld.h + ProcessModules/ProcessCreateExp.h ProcessModules/ProcessDeform.h ProcessModules/ProcessDisplacement.h ProcessModules/ProcessEquiSpacedOutput.h @@ -29,6 +32,7 @@ SET(FieldUtilsHeaders ProcessModules/ProcessInterpField.h ProcessModules/ProcessInterpPoints.h ProcessModules/ProcessInterpPointDataToFld.h + ProcessModules/ProcessInterpPtsToPts.h ProcessModules/ProcessIsoContour.h ProcessModules/ProcessJacobianEnergy.h ProcessModules/ProcessMapping.h @@ -55,6 +59,8 @@ SET(FieldUtilsSources InputModules/InputXml.cpp InputModules/InputPts.cpp InputModules/InputNek5000.cpp + InputModules/InputSemtex.cpp + OutputModules/OutputFileBase.cpp OutputModules/OutputInfo.cpp OutputModules/OutputTecplot.cpp OutputModules/OutputVtk.cpp @@ -67,6 +73,7 @@ SET(FieldUtilsSources ProcessModules/ProcessBoundaryExtract.cpp ProcessModules/ProcessCombineAvg.cpp ProcessModules/ProcessConcatenateFld.cpp + ProcessModules/ProcessCreateExp.cpp ProcessModules/ProcessDeform.cpp ProcessModules/ProcessDisplacement.cpp ProcessModules/ProcessEquiSpacedOutput.cpp @@ -77,6 +84,7 @@ SET(FieldUtilsSources ProcessModules/ProcessInterpField.cpp ProcessModules/ProcessInterpPoints.cpp ProcessModules/ProcessInterpPointDataToFld.cpp + ProcessModules/ProcessInterpPtsToPts.cpp ProcessModules/ProcessIsoContour.cpp ProcessModules/ProcessJacobianEnergy.cpp ProcessModules/ProcessMapping.cpp @@ -95,11 +103,11 @@ SET(FieldUtilsSources ProcessModules/ProcessQualityMetric.cpp ) -ADD_NEKTAR_LIBRARY(FieldUtils lib ${NEKTAR_LIBRARY_TYPE} ${FieldUtilsSources} ${FieldUtilsHeaders}) -TARGET_LINK_LIBRARIES(FieldUtils LINK_PUBLIC GlobalMapping) +ADD_NEKTAR_LIBRARY(FieldUtils + SUMMARY "Nektar++ FieldUtils library" + DESCRIPTION "Collection of post-processing modules for the FieldConvert utility and filter" + SOURCES ${FieldUtilsSources} + HEADERS ${FieldUtilsHeaders} + DEPENDS GlobalMapping) ADD_DEFINITIONS(-DFIELD_UTILS_EXPORTS) - -INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/FieldUtils COMPONENT dev FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") - - diff --git a/library/FieldUtils/Field.hpp b/library/FieldUtils/Field.hpp index 7f7b446c157aa9dea58f2a38046808ff1395e169..555857ab75d4c787c816e1a2c8f180692a65a635 100644 --- a/library/FieldUtils/Field.hpp +++ b/library/FieldUtils/Field.hpp @@ -50,19 +50,10 @@ #include #include #include -#include #include -#include -#include -#include -#include -#include - #include "FieldUtilsDeclspec.h" -using namespace std; - namespace Nektar { namespace FieldUtils @@ -74,8 +65,7 @@ struct Field : m_verbose(false), m_declareExpansionAsContField(false), m_declareExpansionAsDisContField(false), m_requireBoundaryExpansion(false), m_writeBndFld(false), - m_fldToBnd(false), m_addNormals(false), - m_setUpEquiSpacedFields(false), m_fieldPts(LibUtilities::NullPtsField) + m_addNormals(false), m_fieldPts(LibUtilities::NullPtsField) { } @@ -87,9 +77,12 @@ struct Field } } bool m_verbose; - vector m_fielddef; - vector > m_data; - vector m_exp; + std::vector m_fielddef; + std::vector > m_data; + std::vector m_exp; + std::vector m_variables; + + int m_numHomogeneousDir; bool m_declareExpansionAsContField; bool m_declareExpansionAsDisContField; @@ -101,20 +94,14 @@ struct Field LibUtilities::CommSharedPtr m_comm; LibUtilities::SessionReaderSharedPtr m_session; SpatialDomains::MeshGraphSharedPtr m_graph; - LibUtilities::PtsIOSharedPtr m_ptsIO; - map > m_inputfiles; + std::map > m_inputfiles; bool m_writeBndFld; - vector m_bndRegionsToWrite; - bool m_fldToBnd; + std::vector m_bndRegionsToWrite; bool m_addNormals; - bool m_setUpEquiSpacedFields; - LibUtilities::PtsFieldSharedPtr m_fieldPts; - MultiRegions::AssemblyMapCGSharedPtr m_locToGlobalMap; - LibUtilities::FieldMetaDataMap m_fieldMetaDataMap; FIELD_UTILS_EXPORT MultiRegions::ExpListSharedPtr SetUpFirstExpList( @@ -177,7 +164,8 @@ struct Field Exp2DH1 = MemoryManager:: AllocateSharedPtr(m_session, Bkey, ly, m_useFFT, - dealiasing, m_graph); + dealiasing, m_graph, + Collections::eNoCollection); exp = Exp2DH1; } else if (NumHomogeneousDir == 2) @@ -223,7 +211,8 @@ struct Field MultiRegions::ContField3DHomogeneous2D>:: AllocateSharedPtr(m_session, BkeyY, BkeyZ, ly, lz, m_useFFT, dealiasing, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + Collections::eNoCollection); } else if (m_declareExpansionAsDisContField) { @@ -231,14 +220,16 @@ struct Field MultiRegions::DisContField3DHomogeneous2D>:: AllocateSharedPtr(m_session, BkeyY, BkeyZ, ly, lz, m_useFFT, dealiasing, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + Collections::eNoCollection); } else { Exp3DH2 = MemoryManager< MultiRegions::ExpList3DHomogeneous2D>:: AllocateSharedPtr(m_session, BkeyY, BkeyZ, ly, lz, - m_useFFT, dealiasing, m_graph); + m_useFFT, dealiasing, m_graph, + Collections::eNoCollection); } exp = Exp3DH2; @@ -251,18 +242,23 @@ struct Field { Exp1D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + Collections::eNoCollection); } else if (m_declareExpansionAsDisContField) { Exp1D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + true, + Collections::eNoCollection); } else { Exp1D = MemoryManager:: - AllocateSharedPtr(m_session, m_graph); + AllocateSharedPtr(m_session, m_graph, + true, + Collections::eNoCollection); } exp = Exp1D; @@ -327,7 +323,8 @@ struct Field MultiRegions::ContField3DHomogeneous1D>:: AllocateSharedPtr(m_session, Bkey, lz, m_useFFT, dealiasing, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + Collections::eNoCollection); } else if (m_declareExpansionAsDisContField) { @@ -335,14 +332,17 @@ struct Field MultiRegions::DisContField3DHomogeneous1D>:: AllocateSharedPtr(m_session, Bkey, lz, m_useFFT, dealiasing, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + Collections::eNoCollection); } else { Exp3DH1 = MemoryManager< MultiRegions::ExpList3DHomogeneous1D>:: AllocateSharedPtr(m_session, Bkey, lz, m_useFFT, - dealiasing, m_graph); + dealiasing, m_graph, + "DefaultVar", + Collections::eNoCollection); } exp = Exp3DH1; } @@ -354,18 +354,25 @@ struct Field { Exp2D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + true,false, + Collections::eNoCollection); } else if (m_declareExpansionAsDisContField) { Exp2D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + true,true, + Collections::eNoCollection); } else { Exp2D = MemoryManager:: - AllocateSharedPtr(m_session, m_graph); + AllocateSharedPtr(m_session, m_graph, + true, + "DefaultVar", + Collections::eNoCollection); } exp = Exp2D; @@ -380,19 +387,26 @@ struct Field { Exp3D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + false, + Collections::eNoCollection); } else if (m_declareExpansionAsDisContField) { Exp3D = MemoryManager:: AllocateSharedPtr(m_session, m_graph, - m_session->GetVariable(0)); + m_session->GetVariable(0), + true, + Collections::eNoCollection); } else { Exp3D = MemoryManager< - MultiRegions::ExpList3D>::AllocateSharedPtr(m_session, - m_graph); + MultiRegions::ExpList3D>::AllocateSharedPtr( + m_session, + m_graph, + "DefaultVar", + Collections::eNoCollection); } exp = Exp3D; @@ -418,10 +432,9 @@ struct Field * @return Reader for @p filename. */ FIELD_UTILS_EXPORT LibUtilities::FieldIOSharedPtr FieldIOForFile( - string filename) + std::string filename) { - LibUtilities::CommSharedPtr c = m_session ? m_session->GetComm() : - LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); + LibUtilities::CommSharedPtr c = m_comm; string fmt = LibUtilities::FieldIO::GetFileType(filename, c); map::iterator it = m_fld.find(fmt); @@ -440,7 +453,9 @@ struct Field } FIELD_UTILS_EXPORT MultiRegions::ExpListSharedPtr AppendExpList( - int NumHomogeneousDir, string var = "DefaultVar", bool NewField = false) + int NumHomogeneousDir, + std::string var = "DefaultVar", + bool NewField = false) { if (var.compare("DefaultVar") == 0 && m_requireBoundaryExpansion) { @@ -691,8 +706,6 @@ struct Field tmp = MemoryManager:: AllocateSharedPtr(*tmp2, m_graph, var); - - m_locToGlobalMap = tmp2->GetLocalToGlobalMap(); } } else if (m_declareExpansionAsDisContField) @@ -731,10 +744,21 @@ struct Field return tmp; } + FIELD_UTILS_EXPORT void ClearField() + { + m_session = LibUtilities::SessionReaderSharedPtr(); + m_graph = SpatialDomains::MeshGraphSharedPtr(); + m_fieldPts = LibUtilities::NullPtsField; + m_exp.clear(); + m_fielddef = std::vector(); + m_data = std::vector > (); + m_variables.clear(); + } + private: /// Map to store FieldIO instances. Key is the reader type, value is the /// FieldIO object. - map m_fld; + std::map m_fld; }; typedef boost::shared_ptr FieldSharedPtr; diff --git a/library/FieldUtils/FieldUtilsDeclspec.h b/library/FieldUtils/FieldUtilsDeclspec.h index f2d85728ac64e1c24d0a2a3a065d1c0bcd6d51f1..7a34a78b23155799cff5d26feb97eee3d5c6965e 100644 --- a/library/FieldUtils/FieldUtilsDeclspec.h +++ b/library/FieldUtils/FieldUtilsDeclspec.h @@ -43,6 +43,4 @@ #define FIELD_UTILS_EXPORT #endif -#define LOKI_CLASS_LEVEL_THREADING - #endif // NEKTAR_FIELD_UTILS_DECLSPEC_H diff --git a/library/FieldUtils/InputModules/InputDat.cpp b/library/FieldUtils/InputModules/InputDat.cpp index 04445c5578c7fa6b8c90ec38fbab45b2da5f59a7..df3c6d394882c9a5913eae43cb6747560d89963c 100644 --- a/library/FieldUtils/InputModules/InputDat.cpp +++ b/library/FieldUtils/InputModules/InputDat.cpp @@ -53,7 +53,7 @@ ModuleKey InputDat::m_className[1] = { GetModuleFactory().RegisterCreatorFunction( ModuleKey(eInputModule, "dat"), InputDat::create, - "Reads Tecplot dat file for FE block triangular format."), + "Reads Tecplot dat file for FE block triangular format.") }; /** @@ -77,18 +77,8 @@ InputDat::~InputDat() */ void InputDat::Process(po::variables_map &vm) { - - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "Processing input dat file" << endl; - } - } - - string line, word, tag; + string line; std::ifstream datFile; - stringstream s; // Open the file stream. string fname = m_f->m_inputfiles["dat"][0]; @@ -101,7 +91,7 @@ void InputDat::Process(po::variables_map &vm) } // read variables - // currently assum there are x y and z coordinates + // currently assume there are x y and z coordinates int dim = 3; vector fieldNames; while (!datFile.eof()) @@ -153,6 +143,9 @@ void InputDat::Process(po::variables_map &vm) dim, fieldNames, pts); m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsTriBlock); m_f->m_fieldPts->SetConnectivity(ptsConn); + + // save field names + m_f->m_variables = fieldNames; } /** diff --git a/library/FieldUtils/InputModules/InputDat.h b/library/FieldUtils/InputModules/InputDat.h index e35a85954184197a00cfbbfd5b4aaf28d03c800e..7b22e60eb15e4e23415ac73b18f826eb0a16566b 100644 --- a/library/FieldUtils/InputModules/InputDat.h +++ b/library/FieldUtils/InputModules/InputDat.h @@ -64,6 +64,16 @@ public: return "InputDat"; } + virtual std::string GetModuleDescription() + { + return "Processing input dat file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreatePts; + } + private: void ReadTecplotFEBlockZone(std::ifstream &datFile, string &line, diff --git a/library/FieldUtils/InputModules/InputFld.cpp b/library/FieldUtils/InputModules/InputFld.cpp index 0c2443787ebc69601f519ac1503cf27f1c3a51e2..8ada611eb9363b2ed3dafdda371a199fdc57d5b0 100644 --- a/library/FieldUtils/InputModules/InputFld.cpp +++ b/library/FieldUtils/InputModules/InputFld.cpp @@ -40,9 +40,6 @@ using namespace std; #include "InputFld.h" using namespace Nektar; -static std::string npts = LibUtilities::SessionReader::RegisterCmdLineArgument( - "NumberOfPoints", "n", "Define number of points to dump output"); - namespace Nektar { namespace FieldUtils @@ -50,17 +47,18 @@ namespace FieldUtils ModuleKey InputFld::m_className[4] = { GetModuleFactory().RegisterCreatorFunction( - ModuleKey(eInputModule, "fld"), InputFld::create, "Reads Fld file."), - GetModuleFactory().RegisterCreatorFunction(ModuleKey(eInputModule, "chk"), - InputFld::create, - "Reads checkpoint file."), - GetModuleFactory().RegisterCreatorFunction(ModuleKey(eInputModule, "rst"), - InputFld::create, - "Reads restart file."), + ModuleKey(eInputModule, "fld"), + InputFld::create, "Reads Fld file."), + GetModuleFactory().RegisterCreatorFunction( + ModuleKey(eInputModule, "chk"), + InputFld::create, "Reads checkpoint file."), + GetModuleFactory().RegisterCreatorFunction( + ModuleKey(eInputModule, "rst"), + InputFld::create, "Reads restart file."), GetModuleFactory().RegisterCreatorFunction( ModuleKey(eInputModule, "bse"), - InputFld::create, - "Reads stability base-flow file.")}; + InputFld::create, "Reads stability base-flow file.") +}; /** * @brief Set up InputFld object. @@ -86,170 +84,63 @@ InputFld::~InputFld() */ void InputFld::Process(po::variables_map &vm) { + int i; + string fileName = m_config["infile"].as(); - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "Processing input fld file" << endl; - } - } - - int i, j; - string fldending; - // Determine appropriate field input - if (m_f->m_inputfiles.count("fld") != 0) - { - fldending = "fld"; - } - else if (m_f->m_inputfiles.count("chk") != 0) - { - fldending = "chk"; - } - else if (m_f->m_inputfiles.count("rst") != 0) - { - fldending = "rst"; - } - else if (m_f->m_inputfiles.count("bse") != 0) - { - fldending = "bse"; - } - else - { - ASSERTL0(false, "no input file found"); - } + LibUtilities::FieldIOSharedPtr fld = + m_f->FieldIOForFile(fileName); + int oldSize = m_f->m_fielddef.size(); if(m_f->m_graph) { - if (m_f->m_data.size() == 0) - { - // currently load all field (possibly could read data from - // expansion list but it is re-arranged in expansion) - - const SpatialDomains::ExpansionMap &expansions = - m_f->m_graph->GetExpansions(); - - // if Range has been speficied it is possible to have a - // partition which is empty so check this and return if - // no elements present. + // currently load all field (possibly could read data from + // expansion list but it is re-arranged in expansion) - if (!expansions.size()) - { - return; - } + const SpatialDomains::ExpansionMap &expansions = + m_f->m_graph->GetExpansions(); - m_f->m_exp.resize(1); + // if Range has been specified it is possible to have a + // partition which is empty so check this and return if + // no elements present. - Array ElementGIDs(expansions.size()); - SpatialDomains::ExpansionMap::const_iterator expIt; - - i = 0; - for (expIt = expansions.begin(); expIt != expansions.end(); ++expIt) - { - ElementGIDs[i++] = expIt->second->m_geomShPtr->GetGlobalID(); - } + if (!expansions.size()) + { + return; + } - m_f->m_fielddef.clear(); - m_f->m_data.clear(); + Array ElementGIDs(expansions.size()); + SpatialDomains::ExpansionMap::const_iterator expIt; - m_f->FieldIOForFile(m_f->m_inputfiles[fldending][0])->Import( - m_f->m_inputfiles[fldending][0], m_f->m_fielddef, m_f->m_data, - m_f->m_fieldMetaDataMap, ElementGIDs); + i = 0; + for (expIt = expansions.begin(); expIt != expansions.end(); ++expIt) + { + ElementGIDs[i++] = expIt->second->m_geomShPtr->GetGlobalID(); } + + fld->Import( + fileName, m_f->m_fielddef, m_f->m_data, + m_f->m_fieldMetaDataMap, ElementGIDs); } else // load all data. { - m_f->FieldIOForFile(m_f->m_inputfiles[fldending][0])->Import( - m_f->m_inputfiles[fldending][0], m_f->m_fielddef, m_f->m_data, + fld->Import( + fileName, m_f->m_fielddef, m_f->m_data, m_f->m_fieldMetaDataMap); } - // if m_exp defined presume we want to load all field into expansions - if (m_f->m_exp.size()) + // save field names + for(i = 0; i < m_f->m_fielddef[oldSize]->m_fields.size(); ++i) { - int nfields, nstrips; - - m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - - if (vm.count("useSessionVariables")) - { - nfields = m_f->m_session->GetVariables().size(); - } - else - { - nfields = m_f->m_fielddef[0]->m_fields.size(); - } - - m_f->m_exp.resize(nfields * nstrips); - - vector vars = m_f->m_session->GetVariables(); - - // declare other fields; - for (int s = 0; s < nstrips; ++s) // homogeneous strip varient - { - for (i = 0; i < nfields; ++i) - { - if (i < vars.size()) - { - // check to see if field already defined - if (!m_f->m_exp[s * nfields + i]) - { - m_f->m_exp[s * nfields + i] = m_f->AppendExpList( - m_f->m_fielddef[0]->m_numHomogeneousDir, vars[i]); - } - } - else - { - if (vars.size()) - { - m_f->m_exp[s * nfields + i] = m_f->AppendExpList( - m_f->m_fielddef[0]->m_numHomogeneousDir, vars[0]); - } - else - { - m_f->m_exp[s * nfields + i] = m_f->AppendExpList( - m_f->m_fielddef[0]->m_numHomogeneousDir); - } - } - } - } - - // Extract data to coeffs and bwd transform - for (int s = 0; s < nstrips; ++s) // homogeneous strip varient - { - for (j = 0; j < nfields; ++j) - { - for (i = 0; i < m_f->m_data.size() / nstrips; ++i) - { - m_f->m_exp[s * nfields + j]->ExtractDataToCoeffs( - m_f->m_fielddef[i * nstrips + s], - m_f->m_data[i * nstrips + s], - m_f->m_fielddef[i * nstrips + s]->m_fields[j], - m_f->m_exp[s * nfields + j]->UpdateCoeffs()); - } - m_f->m_exp[s * nfields + j]->BwdTrans( - m_f->m_exp[s * nfields + j]->GetCoeffs(), - m_f->m_exp[s * nfields + j]->UpdatePhys()); - } - } - - // reset output field in case Import loaded elements that are not - // in the expansion (because of range option of partitioning) - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); + // check for multiple fld files + vector::iterator it = + find (m_f->m_variables.begin(), + m_f->m_variables.end(), + m_f->m_fielddef[oldSize]->m_fields[i]); - for (j = 0; j < nfields; ++j) + if(it == m_f->m_variables.end()) { - for (i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } + m_f->m_variables.push_back(m_f->m_fielddef[oldSize]->m_fields[i]); } - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/InputModules/InputFld.h b/library/FieldUtils/InputModules/InputFld.h index 4c33bd9af9591bbbd08a62865922b05cceae5603..0ec988b8587ddfc10190c10c7e6f0ea339a0871b 100644 --- a/library/FieldUtils/InputModules/InputFld.h +++ b/library/FieldUtils/InputModules/InputFld.h @@ -66,6 +66,16 @@ public: return "InputFld"; } + virtual std::string GetModuleDescription() + { + return "Processing input fld file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreateFieldData; + } + private: }; } diff --git a/library/FieldUtils/InputModules/InputNek5000.cpp b/library/FieldUtils/InputModules/InputNek5000.cpp index 6e708ed4232703e2cdeea649c0520c2ab86f1980..8024675cea992e047adb9baf4f9eab25dbe167b9 100644 --- a/library/FieldUtils/InputModules/InputNek5000.cpp +++ b/library/FieldUtils/InputModules/InputNek5000.cpp @@ -54,28 +54,6 @@ ModuleKey InputNek5000::m_className[1] = { "Reads Nek5000 field file.") }; -/** - * @brief Swap endian ordering of the input variable. - */ -template -void swap_endian(T &u) -{ - union - { - T u; - unsigned char u8[sizeof(T)]; - } source, dest; - - source.u = u; - - for (size_t k = 0; k < sizeof(T); k++) - { - dest.u8[k] = source.u8[sizeof(T) - k - 1]; - } - - u = dest.u; -} - /** * @brief Set up InputNek5000 object. * @@ -105,16 +83,7 @@ InputNek5000::~InputNek5000() */ void InputNek5000::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "Processing Nek5000 field file" << endl; - } - } - - string fldending = "fld5000"; - ifstream file(m_f->m_inputfiles[fldending][0].c_str(), ios::binary); + ifstream file(m_config["infile"].as().c_str(), ios::binary); // Header: 132 bytes for binary. vector data(132); @@ -246,7 +215,8 @@ void InputNek5000::Process(po::variables_map &vm) case ' ': continue; default: - cerr << "Field contains unknown variable: " << remain[i] << endl; + cerr << "Field contains unknown variable: " + << remain[i] << endl; abort(); } } @@ -303,6 +273,9 @@ void InputNek5000::Process(po::variables_map &vm) } m_f->m_fielddef.push_back(fielddef); + + // save field names + m_f->m_variables = m_f->m_fielddef[0]->m_fields; } } } diff --git a/library/FieldUtils/InputModules/InputNek5000.h b/library/FieldUtils/InputModules/InputNek5000.h index 7abbec6484494704f44009f67edb79ae0c857532..9643102732532897349dec60e98976c671cd1458 100644 --- a/library/FieldUtils/InputModules/InputNek5000.h +++ b/library/FieldUtils/InputModules/InputNek5000.h @@ -66,6 +66,16 @@ public: return "InputNek5000"; } + virtual std::string GetModuleDescription() + { + return "Processing Nek5000 field file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreateFieldData; + } + private: }; } diff --git a/library/FieldUtils/InputModules/InputPts.cpp b/library/FieldUtils/InputModules/InputPts.cpp index 4c890e5f694e236a8e83bba80405fc640c91a14d..cffc1b517b4298cbadf2afcf69b678dd9f746269 100644 --- a/library/FieldUtils/InputModules/InputPts.cpp +++ b/library/FieldUtils/InputModules/InputPts.cpp @@ -107,18 +107,19 @@ void InputPts::Process(po::variables_map &vm) } - string inFile = (m_f->m_inputfiles[ptsending][0]).c_str(); + string inFile = m_config["infile"].as(); + LibUtilities::PtsIOSharedPtr ptsIO; if (m_f->m_session) { if (!ptsending.compare("pts")) { - m_f->m_ptsIO = MemoryManager::AllocateSharedPtr( + ptsIO = MemoryManager::AllocateSharedPtr( m_f->m_session->GetComm()); } else { - m_f->m_ptsIO = MemoryManager::AllocateSharedPtr( + ptsIO = MemoryManager::AllocateSharedPtr( m_f->m_session->GetComm()); } } @@ -128,16 +129,22 @@ void InputPts::Process(po::variables_map &vm) LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); if (!ptsending.compare("pts")) { - m_f->m_ptsIO = MemoryManager::AllocateSharedPtr(c); + ptsIO = MemoryManager::AllocateSharedPtr(c); } else { - m_f->m_ptsIO = MemoryManager::AllocateSharedPtr(c); + ptsIO = MemoryManager::AllocateSharedPtr(c); } } - m_f->m_ptsIO->Import(inFile, m_f->m_fieldPts); + ptsIO->Import(inFile, m_f->m_fieldPts); + + // save field names + for (int j = 0; j < m_f->m_fieldPts->GetNFields(); ++j) + { + m_f->m_variables.push_back(m_f->m_fieldPts->GetFieldName(j)); + } } } } diff --git a/library/FieldUtils/InputModules/InputPts.h b/library/FieldUtils/InputModules/InputPts.h index a06715713fb0419917f97514a0237790e7bc07c3..831f3f22110188cad3c4ade38f731ab6ae5932b4 100644 --- a/library/FieldUtils/InputModules/InputPts.h +++ b/library/FieldUtils/InputModules/InputPts.h @@ -66,6 +66,16 @@ public: return "InputPts"; } + virtual std::string GetModuleDescription() + { + return "Processing input pts file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreatePts; + } + private: }; } diff --git a/library/FieldUtils/InputModules/InputSemtex.cpp b/library/FieldUtils/InputModules/InputSemtex.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d002c67fd208d0a20d02412853e938219206d1da --- /dev/null +++ b/library/FieldUtils/InputModules/InputSemtex.cpp @@ -0,0 +1,278 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: InputSemtex.cpp +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Reads a Semtex checkpoint file. +// +//////////////////////////////////////////////////////////////////////////////// + +#include +#include +#include +#include +using namespace std; + +#include +#include + +#include "InputSemtex.h" + +namespace Nektar +{ +namespace FieldUtils +{ + +ModuleKey InputSemtex::m_className[1] = { + GetModuleFactory().RegisterCreatorFunction( + ModuleKey(eInputModule, "fldsem"), InputSemtex::create, + "Reads Semtex field file.") +}; + +/** + * @brief Set up InputSemtex object. + * + */ +InputSemtex::InputSemtex(FieldSharedPtr f) : InputModule(f) +{ + m_allowedFiles.insert("fldsem"); +} + +/** + * + */ +InputSemtex::~InputSemtex() +{ +} + +/** + * @brief Process Semtex input file. + * + * This routine reads a binary-format Semtex field file, loads the data into + * memory and populates the field definitions to match the data format. Semtex + * is a classic nodal-Lagrangian spectral element code at a single polynomial + * order, meaning that the field data are set up according to this structure. + */ +void InputSemtex::Process(po::variables_map &vm) +{ + // Variables to be read from session file + string sessionName, date, fields, endian; + int nr, ns, nz, nelmt, step; + NekDouble time, dt, kinvis, beta; + + ifstream file(m_config["infile"].as().c_str(), ios::binary); + + // -- Read header information. + char buf[25]; + string line; + + // Session name + file.read(buf, 25); + sessionName = string(buf, 25); + boost::trim(sessionName); + getline(file, line); + m_f->m_fieldMetaDataMap["SessionName0"] = sessionName; + + // Date + file.read(buf, 25); + date = string(buf, 25); + boost::trim(date); + getline(file, line); + + // nP, nZ, nElmt + file >> nr >> ns >> nz >> nelmt; + getline(file, line); + + // Step + file >> step; + getline(file, line); + + // Time + file >> time; + getline(file, line); + m_f->m_fieldMetaDataMap["Time"] = boost::lexical_cast(time); + + // Timestep + file >> dt; + getline(file, line); + m_f->m_fieldMetaDataMap["TimeStep"] = boost::lexical_cast(dt); + + // Viscosity + file >> kinvis; + getline(file, line); + m_f->m_fieldMetaDataMap["Kinvis"] = boost::lexical_cast(kinvis); + + // Beta + file >> beta; + getline(file, line); + + // Fields + file.read(buf, 25); + fields = string(buf, 25); + boost::trim(fields); + getline(file, line); + + // Endian-ness + LibUtilities::EndianType systemEndian = LibUtilities::Endianness(); + std::string endianSearch; + if (systemEndian == LibUtilities::eEndianBig) + { + endianSearch = "big"; + } + else if (systemEndian == LibUtilities::eEndianLittle) + { + endianSearch = "little"; + } + else + { + ASSERTL0(false, "Only little- or big-endian systems are supported"); + } + + file.read(buf, 25); + endian = string(buf, 25); + bool byteSwap = endian.find(endianSearch) == string::npos; + getline(file, line); + + // Print some basic information for input if in verbose mode. + if (m_f->m_verbose) + { + cout << "Found header information:" << endl; + cout << " -- From session : " << sessionName << endl; + cout << " -- File generated : " << date << endl; + cout << " -- Polynomial order : " << nr-1 << endl; + cout << " -- Number of planes : " << nz << endl; + cout << " -- Number of elements : " << nelmt << endl; + cout << " -- Simulation time : " << time << endl; + cout << " -- Timestep : " << dt << endl; + cout << " -- Viscosity : " << kinvis << endl; + cout << " -- Fields : " << fields + << " (" << fields.size() << " total)" << endl; + + if (nz > 1) + { + cout << " -- Homogeneous length : " << 2*M_PI/beta << endl; + } + + cout << " -- " << (byteSwap ? "" : "do not ") << "need to swap endian" + << endl; + } + + ASSERTL0(nr == ns, "Semtex reader assumes values of nr and ns are equal"); + + // Set up a field definition + LibUtilities::FieldDefinitionsSharedPtr fielddef = MemoryManager< + LibUtilities::FieldDefinitions>::AllocateSharedPtr(); + fielddef->m_shapeType = LibUtilities::eQuadrilateral; + fielddef->m_homoStrips = false; + fielddef->m_pointsDef = false; + fielddef->m_uniOrder = true; + fielddef->m_numPointsDef = false; + + // Set up basis + fielddef->m_basis.push_back(LibUtilities::eGLL_Lagrange); + fielddef->m_basis.push_back(LibUtilities::eGLL_Lagrange); + fielddef->m_numModes.push_back(nr); + fielddef->m_numModes.push_back(nr); + + // Set up elements + fielddef->m_elementIDs.resize(nelmt); + for (int i = 0; i < nelmt; ++i) + { + fielddef->m_elementIDs[i] = i; + } + + // Deal with homogeneous direction. + if (nz > 1) + { + fielddef->m_numHomogeneousDir = 1; + fielddef->m_homogeneousLengths.push_back(2 * M_PI / beta); + fielddef->m_numModes.push_back(nz); + fielddef->m_basis.push_back(LibUtilities::eFourier); + + for (int i = 0; i < nz; ++i) + { + fielddef->m_homogeneousZIDs.push_back(i); + } + } + else + { + fielddef->m_numHomogeneousDir = 0; + } + + for (string::size_type i = 0; i < fields.size(); ++i) + { + fielddef->m_fields.push_back(string(&fields[i], 1)); + } + + // Size of data to read. + size_t elmtSize = nr * ns; + size_t planeSize = elmtSize * nelmt; + size_t fieldSize = planeSize * nz; + size_t dataSize = fieldSize * fields.size(); + + // Allocate our storage. + m_f->m_data.resize(1); + m_f->m_data[0].resize(dataSize); + + // Temporary storage for one plane of data. + vector tmp(planeSize); + size_t offset = nz * nr * ns; + + // Now reorder data; Semtex ordering traverses memory fastest over planes, + // whereas Nektar++ expects it over elements + for (int i = 0; i < fields.size(); ++i) + { + NekDouble *data = &m_f->m_data[0][i * fieldSize]; + for (int j = 0; j < nz; ++j) + { + size_t elSizeJ = j * elmtSize; + file.read((char *)&tmp[0], planeSize * sizeof(NekDouble)); + + if (byteSwap) + { + swap_endian(tmp); + } + + for (int k = 0; k < nelmt; ++k) + { + std::copy(&tmp[k * elmtSize], &tmp[(k+1) * elmtSize], + data + k * offset + elSizeJ); + + } + } + } + + m_f->m_fielddef.push_back(fielddef); + + // save field names + m_f->m_variables = m_f->m_fielddef[0]->m_fields; +} + +} +} diff --git a/utilities/MeshConvert.cpp b/library/FieldUtils/InputModules/InputSemtex.h similarity index 63% rename from utilities/MeshConvert.cpp rename to library/FieldUtils/InputModules/InputSemtex.h index 465733e0afce0b26596c3c8dbb0c89fa005d35e5..198efd34ea60c4184ec665186df1dc8c3c83c020 100644 --- a/utilities/MeshConvert.cpp +++ b/library/FieldUtils/InputModules/InputSemtex.h @@ -1,6 +1,6 @@ //////////////////////////////////////////////////////////////////////////////// // -// File: MeshConvert.cpp +// File: InputSemtex.h // // For more information, please see: http://www.nektar.info/ // @@ -29,26 +29,56 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // -// Description: Mesh conversion utility. +// Description: Reads a Semtex checkpoint file. // //////////////////////////////////////////////////////////////////////////////// -#include -#include - -std::string errMsg = -"MeshConvert\n" -"-----------\n" -"If you're seeing this message, you tried to run the MeshConvert executable.\n\n" -"From version 4.3 onwards, MeshConvert has been renamed NekMesh. This has\n" -"been done because MeshConvert now incorporates mesh generation features,\n" -"which takes it beyond the realm of a converter.\n\n" -"Note that the usage of NekMesh is functionally identical to MeshConvert. You\n" -"simply need to change the program name to NekMesh.\n\n" -"This placeholder program will be removed in v4.4.\n"; - -int main() +#ifndef FIELDUTILS_INPUTSEMTEX +#define FIELDUTILS_INPUTSEMTEX + +#include "../Module.h" + +namespace Nektar +{ +namespace FieldUtils { - std::cout << errMsg; - return 1; + +/** + * Converter for Fld files. + */ +class InputSemtex : public InputModule +{ +public: + InputSemtex(FieldSharedPtr f); + virtual ~InputSemtex(); + virtual void Process(po::variables_map &vm); + + /// Creates an instance of this class + static ModuleSharedPtr create(FieldSharedPtr f) + { + return MemoryManager::AllocateSharedPtr(f); + } + /// %ModuleKey for class. + static ModuleKey m_className[]; + + virtual std::string GetModuleName() + { + return "InputSemtex"; + } + + virtual std::string GetModuleDescription() + { + return "Processing Semtex field file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreateFieldData; + } + +private: +}; +} } + +#endif diff --git a/library/FieldUtils/InputModules/InputXml.cpp b/library/FieldUtils/InputModules/InputXml.cpp index 344b664e92179d79b054c3a05a43f839a3155bd2..35e86446dc62aa22d166cee506a7df2b2d250061 100644 --- a/library/FieldUtils/InputModules/InputXml.cpp +++ b/library/FieldUtils/InputModules/InputXml.cpp @@ -43,9 +43,6 @@ using namespace std; #include "InputXml.h" using namespace Nektar; -static std::string npts = LibUtilities::SessionReader::RegisterCmdLineArgument( - "NumberOfPoints", "n", "Define number of points to dump output"); - namespace Nektar { namespace FieldUtils @@ -66,9 +63,6 @@ InputXml::InputXml(FieldSharedPtr f) : InputModule(f) { m_allowedFiles.insert("xml"); m_allowedFiles.insert("xml.gz"); - m_allowedFiles.insert("fld"); // these files could be allowed with xml files - m_allowedFiles.insert("chk"); - m_allowedFiles.insert("rst"); } /** @@ -83,48 +77,20 @@ InputXml::~InputXml() */ void InputXml::Process(po::variables_map &vm) { - Timer timerpart; - - // check for multiple calls to inputXml due to split xml - // files. If so just return - int expsize = m_f->m_exp.size(); - m_f->m_comm->AllReduce(expsize, LibUtilities::ReduceMax); - - if (expsize != 0) - { - return; - } - + LibUtilities::Timer timerpart; if (m_f->m_verbose) { if (m_f->m_comm->TreatAsRankZero()) { - cout << "Processing input xml file" << endl; timerpart.Start(); } } - // check to see if fld file defined so can use in - // expansion defintion if required - string fldending; - bool fldfilegiven = true; - - // Determine appropriate field input - if (m_f->m_inputfiles.count("fld") != 0) - { - fldending = "fld"; - } - else if (m_f->m_inputfiles.count("chk") != 0) - { - fldending = "chk"; - } - else if (m_f->m_inputfiles.count("rst") != 0) - { - fldending = "rst"; - } - else + // check for multiple calls to inputXml due to split xml + // files. If so just return + if (m_f->m_graph) { - fldfilegiven = false; + return; } string xml_ending = "xml"; @@ -288,130 +254,6 @@ void InputXml::Process(po::variables_map &vm) timerpart.Start(); } } - - // currently load all field (possibly could read data from - // expansion list but it is re-arranged in expansion) - const SpatialDomains::ExpansionMap &expansions = - m_f->m_graph->GetExpansions(); - - // if Range has been speficied it is possible to have a - // partition which is empty so ccheck this and return if - // no elements present. - if (!expansions.size()) - { - return; - } - - m_f->m_exp.resize(1); - - // load fielddef header if fld file is defined. This gives - // precedence to Homogeneous definition in fld file - int NumHomogeneousDir = 0; - if (fldfilegiven) - { - // use original expansion to identify which elements are in - // this partition/subrange - - Array ElementGIDs(expansions.size()); - SpatialDomains::ExpansionMap::const_iterator expIt; - - int i = 0; - for (expIt = expansions.begin(); expIt != expansions.end(); ++expIt) - { - ElementGIDs[i++] = expIt->second->m_geomShPtr->GetGlobalID(); - } - - m_f->m_fielddef.clear(); - m_f->m_data.clear(); - - m_f->FieldIOForFile(m_f->m_inputfiles[fldending][0])->Import( - m_f->m_inputfiles[fldending][0], m_f->m_fielddef, m_f->m_data, - m_f->m_fieldMetaDataMap, ElementGIDs); - NumHomogeneousDir = m_f->m_fielddef[0]->m_numHomogeneousDir; - - //---------------------------------------------- - // Set up Expansion information to use mode order from field - m_f->m_graph->SetExpansions(m_f->m_fielddef); - } - else - { - if (m_f->m_session->DefinesSolverInfo("HOMOGENEOUS")) - { - std::string HomoStr = m_f->m_session->GetSolverInfo("HOMOGENEOUS"); - - if ((HomoStr == "HOMOGENEOUS1D") || (HomoStr == "Homogeneous1D") || - (HomoStr == "1D") || (HomoStr == "Homo1D")) - { - NumHomogeneousDir = 1; - } - if ((HomoStr == "HOMOGENEOUS2D") || (HomoStr == "Homogeneous2D") || - (HomoStr == "2D") || (HomoStr == "Homo2D")) - { - NumHomogeneousDir = 2; - } - } - } - - // reset expansion defintion to use equispaced points if required. - if (m_requireEquiSpaced || vm.count("output-points")) - { - int nPointsNew = 0; - - if (vm.count("output-points")) - { - nPointsNew = vm["output-points"].as(); - } - - m_f->m_graph->SetExpansionsToEvenlySpacedPoints(nPointsNew); - } - else - { - if (vm.count("output-points")) - { - int nPointsNew = vm["output-points"].as(); - m_f->m_graph->SetExpansionsToPointOrder(nPointsNew); - } - } - - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - timerpart.Stop(); - NekDouble cpuTime = timerpart.TimePerTest(1); - - stringstream ss; - ss << cpuTime << "s"; - cout << "\t InputXml setexpansion CPU Time: " << setw(8) << left - << ss.str() << endl; - timerpart.Start(); - } - } - - // Override number of planes with value from cmd line - if (NumHomogeneousDir == 1 && vm.count("output-points-hom-z")) - { - int expdim = m_f->m_graph->GetMeshDimension(); - m_f->m_fielddef[0]->m_numModes[expdim] = - vm["output-points-hom-z"].as(); - } - - m_f->m_exp[0] = m_f->SetUpFirstExpList(NumHomogeneousDir, fldfilegiven); - - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - timerpart.Stop(); - NekDouble cpuTime = timerpart.TimePerTest(1); - - stringstream ss1; - - ss1 << cpuTime << "s"; - cout << "\t InputXml set first exp CPU Time: " << setw(8) << left - << ss1.str() << endl; - } - } } } } diff --git a/library/FieldUtils/InputModules/InputXml.h b/library/FieldUtils/InputModules/InputXml.h index 75853932e3fc53394aa83b412e479a35f510cdab..d4467137974fd103e92ffed849766aaeca1c1667 100644 --- a/library/FieldUtils/InputModules/InputXml.h +++ b/library/FieldUtils/InputModules/InputXml.h @@ -66,6 +66,16 @@ public: return "InputXml"; } + virtual std::string GetModuleDescription() + { + return "Processing input xml file"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreateGraph; + } + private: }; } diff --git a/library/FieldUtils/Interpolator.cpp b/library/FieldUtils/Interpolator.cpp index db93c0deb26586d33f07a759b7b73ed789dc2736..598d91d07ff03cf663b308907c96cf4d4d909afd 100644 --- a/library/FieldUtils/Interpolator.cpp +++ b/library/FieldUtils/Interpolator.cpp @@ -34,10 +34,14 @@ // //////////////////////////////////////////////////////////////////////////////// +#include #include using namespace std; +namespace bg = boost::geometry; +namespace bgi = boost::geometry::index; + namespace Nektar { namespace FieldUtils @@ -64,9 +68,6 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, int nOutPts = m_ptsOutField->GetNpoints(); int lastProg = 0; - m_weights = Array >(nOutPts); - m_neighInds = Array >(nOutPts); - // set a default method if (m_method == eNoMethod) { @@ -89,6 +90,9 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, { case eNearestNeighbour: { + m_weights = Array(nOutPts, 1, 0.0); + m_neighInds = Array(nOutPts, 1, (unsigned int) 0); + for (int i = 0; i < nOutPts; ++i) { Array tmp(m_dim, 0.0); @@ -116,6 +120,9 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, ASSERTL0(m_ptsInField->GetDim() == 1 || m_coordId >= 0, "not implemented"); + m_weights = Array(nOutPts, 3, 0.0); + m_neighInds = Array(nOutPts, 3, (unsigned int) 0); + if (m_ptsInField->GetDim() == 1) { m_coordId = 0; @@ -152,6 +159,12 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, case eShepard: { + int numPts = pow(double(2), m_ptsInField->GetDim()); + numPts = min(numPts, int(m_ptsInField->GetNpoints() / 2)); + + m_weights = Array(nOutPts, numPts, 0.0); + m_neighInds = Array(nOutPts, numPts, (unsigned int) 0); + for (int i = 0; i < nOutPts; ++i) { Array tmp(m_dim, 0.0); @@ -161,7 +174,7 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, } PtsPoint searchPt(i, tmp, 1E30); - CalcW_Shepard(searchPt); + CalcW_Shepard(searchPt, numPts); int progress = int(100 * i / nOutPts); if (m_progressCallback && progress > lastProg) @@ -181,6 +194,11 @@ void Interpolator::CalcWeights(const LibUtilities::PtsFieldSharedPtr ptsInField, // use m_filtWidth as FWHM NekDouble sigma = m_filtWidth * 0.4246609001; + m_maxPts = min(m_maxPts, int(m_ptsInField->GetNpoints() / 2)); + + m_weights = Array(nOutPts, m_maxPts, 0.0); + m_neighInds = Array(nOutPts, m_maxPts, (unsigned int) 0); + for (int i = 0; i < nOutPts; ++i) { Array tmp(m_dim, 0.0); @@ -230,12 +248,12 @@ void Interpolator::Interpolate(const LibUtilities::PtsFieldSharedPtr ptsInField, m_ptsInField = ptsInField; m_ptsOutField = ptsOutField; - if (m_weights.num_elements() == 0) + if (m_weights.GetRows() == 0) { CalcWeights(m_ptsInField, m_ptsOutField); } - ASSERTL0(m_weights.num_elements() == m_ptsOutField->GetNpoints(), + ASSERTL0(m_weights.GetRows() == m_ptsOutField->GetNpoints(), "weights dimension mismatch"); int nFields = m_ptsOutField->GetNFields(); @@ -247,7 +265,7 @@ void Interpolator::Interpolate(const LibUtilities::PtsFieldSharedPtr ptsInField, { for (int j = 0; j < nOutPts; ++j) { - int nPts = m_weights[j].num_elements(); + int nPts = m_weights.GetColumns(); // skip if there were no neighbours found for this point if (nPts == 0) @@ -303,9 +321,6 @@ void Interpolator::Interpolate( int nOutDim = m_expOutField[0]->GetCoordim(0); int lastProg = 0; - m_weights = Array >(nOutPts); - m_neighInds = Array >(nOutPts); - Array Lcoords(nInDim, 0.0); Array Scoords(nOutDim, 0.0); Array > coords(nOutDim); @@ -405,9 +420,6 @@ void Interpolator::Interpolate( int nOutPts = m_ptsOutField->GetNpoints(); int lastProg = 0; - m_weights = Array >(nOutPts); - m_neighInds = Array >(nOutPts); - for (int i = 0; i < nOutPts; ++i) { Array Lcoords(nInDim, 0.0); @@ -558,14 +570,20 @@ LibUtilities::PtsFieldSharedPtr Interpolator::GetOutField() const void Interpolator::PrintStatistics() { int meanN = 0; - for (int i = 0; i < m_neighInds.num_elements(); ++i) + for (int i = 0; i < m_neighInds.GetRows(); ++i) { - meanN += m_neighInds[i].num_elements(); + for (int j = 0; j < m_neighInds.GetColumns(); ++j) + { + if (m_neighInds[i][j] > 0) + { + meanN +=1; + } + } } - cout << "Number of points: " << m_neighInds.num_elements() << endl; + cout << "Number of points: " << m_neighInds.GetRows() << endl; cout << "mean Number of Neighbours per point: " - << meanN / m_neighInds.num_elements() << endl; + << meanN / m_neighInds.GetRows() << endl; } /** @@ -590,30 +608,23 @@ void Interpolator::CalcW_Gauss(const PtsPoint &searchPt, // handle the cases that there was no or just one point within 4 * sigma if (numPts == 0) { - m_neighInds[searchPt.idx] = Array(0); - m_weights[searchPt.idx] = Array(0); - return; } if (numPts == 1) { - m_neighInds[searchPt.idx] = - Array(1, neighbourPts.front().idx); - m_weights[searchPt.idx] = Array(1, 1.0); + m_neighInds[searchPt.idx][0] = neighbourPts.front().idx; + m_weights[searchPt.idx][0] = 1.0; return; } NekDouble sigmaNew = 0.25 * neighbourPts.back().dist; - m_neighInds[searchPt.idx] = Array(numPts); for (int i = 0; i < numPts; i++) { m_neighInds[searchPt.idx][i] = neighbourPts.at(i).idx; } - m_weights[searchPt.idx] = Array(numPts, 0.0); - NekDouble wSum = 0.0; NekDouble ts2 = 2 * sigmaNew * sigmaNew; for (int i = 0; i < numPts; ++i) @@ -627,9 +638,6 @@ void Interpolator::CalcW_Gauss(const PtsPoint &searchPt, { m_weights[searchPt.idx][i] = m_weights[searchPt.idx][i] / wSum; } - - ASSERTL0(Vmath::Nnan(numPts, m_weights[searchPt.idx], 1) == 0, - "NaN found in weights"); } /** @@ -648,10 +656,6 @@ void Interpolator::CalcW_Linear(const PtsPoint &searchPt, int m_coordId) NekDouble coord = searchPt.coords[m_coordId]; - int numPts = 2; - m_neighInds[searchPt.idx] = Array(numPts); - m_weights[searchPt.idx] = Array(numPts, 0.0); - for (i = 0; i < npts - 1; ++i) { if ((m_ptsInField->GetPointVal(0, i) <= @@ -694,9 +698,8 @@ void Interpolator::CalcW_NNeighbour(const PtsPoint &searchPt) // most distant points (of same distance) FindNNeighbours(searchPt, neighbourPts, 1); - m_neighInds[searchPt.idx] = - Array(1, neighbourPts.at(0).idx); - m_weights[searchPt.idx] = Array(1, 1.0); + m_neighInds[searchPt.idx][0] = neighbourPts[0].idx; + m_weights[searchPt.idx][0] = 1.0; } /** @@ -714,25 +717,20 @@ void Interpolator::CalcW_NNeighbour(const PtsPoint &searchPt) * Contrary to Shepard, we use a fixed number of points with fixed weighting * factors 1/d^n. */ -void Interpolator::CalcW_Shepard(const PtsPoint &searchPt) +void Interpolator::CalcW_Shepard(const PtsPoint &searchPt, int numPts) { // find nearest neighbours vector neighbourPts; - int numPts = pow(double(2), m_ptsInField->GetDim()); - numPts = min(numPts, int(m_ptsInField->GetNpoints() / 2)); FindNNeighbours(searchPt, neighbourPts, numPts); - m_neighInds[searchPt.idx] = Array(numPts); - for (int i = 0; i < numPts; i++) + for (int i = 0; i < neighbourPts.size(); i++) { - m_neighInds[searchPt.idx][i] = neighbourPts.at(i).idx; + m_neighInds[searchPt.idx][i] = neighbourPts[i].idx; } - m_weights[searchPt.idx] = Array(numPts, 0.0); - // In case d < kVertexTheSameDouble ( d^2 < kNekSqrtTol), use the exact // point and return - for (int i = 0; i < numPts; ++i) + for (int i = 0; i < neighbourPts.size(); ++i) { if (neighbourPts[i].dist <= NekConstants::kNekZeroTol) { @@ -742,20 +740,17 @@ void Interpolator::CalcW_Shepard(const PtsPoint &searchPt) } NekDouble wSum = 0.0; - for (int i = 0; i < numPts; ++i) + for (int i = 0; i < neighbourPts.size(); ++i) { m_weights[searchPt.idx][i] = 1 / pow(double(neighbourPts[i].dist), double(m_ptsInField->GetDim())); wSum += m_weights[searchPt.idx][i]; } - for (int i = 0; i < numPts; ++i) + for (int i = 0; i < neighbourPts.size(); ++i) { m_weights[searchPt.idx][i] = m_weights[searchPt.idx][i] / wSum; } - - ASSERTL0(Vmath::Nnan(numPts, m_weights[searchPt.idx], 1) == 0, - "NaN found in weights"); } /** @@ -776,10 +771,6 @@ void Interpolator::CalcW_Quadratic(const PtsPoint &searchPt, int m_coordId) NekDouble coord = searchPt.coords[m_coordId]; - int numPts = 3; - m_neighInds[searchPt.idx] = Array(numPts); - m_weights[searchPt.idx] = Array(numPts, 0.0); - for (i = 0; i < npts - 1; ++i) { if ((m_ptsInField->GetPointVal(0, i) <= diff --git a/library/FieldUtils/Interpolator.h b/library/FieldUtils/Interpolator.h index 02a2ca6e1a6412c45ebd65b9dbc02b0dd37158da..7176e2afd7ec8928d817ec3a64906cf8963b8ffd 100644 --- a/library/FieldUtils/Interpolator.h +++ b/library/FieldUtils/Interpolator.h @@ -38,15 +38,15 @@ #define FIELDUTILS_INTERPOLATOR_H #include +#include -#include -#include - -#include #include #include #include +#include +#include + #include #include @@ -56,9 +56,6 @@ #include "FieldUtilsDeclspec.h" -namespace bg = boost::geometry; -namespace bgi = boost::geometry::index; - namespace Nektar { namespace FieldUtils @@ -182,9 +179,9 @@ private: /// dimension of this interpolator. Hardcoded to 3 static const int m_dim = 3; - typedef bg::model::point BPoint; + typedef boost::geometry::model::point BPoint; typedef std::pair PtsPointPair; - typedef bgi::rtree > PtsRtree; + typedef boost::geometry::index::rtree > PtsRtree; /// input field LibUtilities::PtsFieldSharedPtr m_ptsInField; @@ -203,10 +200,10 @@ private: boost::shared_ptr m_rtree; /// Interpolation weights for each neighbour. /// Structure: m_weights[physPtIdx][neighbourIdx] - Array > m_weights; + Array m_weights; /// Indices of the relevant neighbours for each physical point. /// Structure: m_neighInds[ptIdx][neighbourIdx] - Array > m_neighInds; + Array m_neighInds; /// Filter width used for some interpolation algorithms NekDouble m_filtWidth; /// Max number of interpolation points @@ -225,7 +222,7 @@ private: FIELD_UTILS_EXPORT void CalcW_NNeighbour(const PtsPoint &searchPt); - FIELD_UTILS_EXPORT void CalcW_Shepard(const PtsPoint &searchPt); + FIELD_UTILS_EXPORT void CalcW_Shepard(const PtsPoint &searchPt, int numPts); FIELD_UTILS_EXPORT void CalcW_Quadratic(const PtsPoint &searchPt, int coordId); diff --git a/library/FieldUtils/Module.cpp b/library/FieldUtils/Module.cpp index 070c009dcb703a30e3134852ea6a0bfeaf9dfefe..08639d7afba4b1152e19dd3c4f2a09a8b52c7f64 100644 --- a/library/FieldUtils/Module.cpp +++ b/library/FieldUtils/Module.cpp @@ -48,10 +48,8 @@ namespace FieldUtils */ ModuleFactory &GetModuleFactory() { - typedef Loki::SingletonHolder - Type; - return Type::Instance(); + static ModuleFactory instance; + return instance; } /** @@ -157,6 +155,38 @@ void Module::SetDefaults() } } +/** + * @brief Tries to guess the format of the input file. + */ +string InputModule::GuessFormat(string filename) +{ + // Read first 64 bytes of data, assuming input is this long. + ifstream inFile(filename.c_str(), ios::binary); + vector data(64, 0); + inFile.read(&data[0], 64); + + string check(&data[0], 64); + + // Nek5000 format: first four characters are: #std + if (check.compare(0, 4, "#std") == 0) + { + inFile.close(); + return "fld5000"; + } + + // Semtex format: first line should contain the string "Session" at + // character 27. + if (check.compare(26, 7, "Session") == 0) + { + inFile.close(); + return "fldsem"; + } + + // Otherwise don't really know -- try to guess from file extension. + inFile.close(); + return ""; +} + /** * @brief Print a brief summary of information. */ diff --git a/library/FieldUtils/Module.h b/library/FieldUtils/Module.h index db3ec74f5403a9372b5830582b575a64e735a4ab..3249cf9b00cca511a00e45e6c713c001ccc2ec3c 100644 --- a/library/FieldUtils/Module.h +++ b/library/FieldUtils/Module.h @@ -45,7 +45,6 @@ #include #include -#include #include #include @@ -59,8 +58,6 @@ namespace Nektar { namespace FieldUtils { -using namespace std; - /** * Denotes different types of mesh converter modules: so far only * input, output and process modules are defined. @@ -75,6 +72,54 @@ enum ModuleType const char *const ModuleTypeMap[] = {"Input", "Process", "Output"}; +enum ModulePriority +{ + eCreateGraph, + eCreateFieldData, + eModifyFieldData, + eCreateExp, + eFillExp, + eModifyExp, + eBndExtraction, + eCreatePts, + eConvertExpToPts, + eModifyPts, + eOutput, + SIZE_ModulePriority +}; + +/** + * @brief Swap endian ordering of the input variable. + */ +template +void swap_endian(T &u) +{ + union + { + T u; + unsigned char u8[sizeof(T)]; + } source, dest; + + source.u = u; + + for (size_t k = 0; k < sizeof(T); k++) + { + dest.u8[k] = source.u8[sizeof(T) - k - 1]; + } + + u = dest.u; +} + +template +void swap_endian(vector &u) +{ + size_t vecSize = u.size(); + for (int i = 0; i < vecSize; ++i) + { + swap_endian(u[i]); + } +} + /** * @brief Represents a command-line configuration option. */ @@ -87,7 +132,7 @@ struct ConfigOption * @param defValue Default value of the option. * @param desc Description of the option. */ - ConfigOption(bool isBool, string defValue, string desc) + ConfigOption(bool isBool, std::string defValue, std::string desc) : m_isBool(isBool), m_beenSet(false), m_value(), m_defValue(defValue), m_desc(desc) { @@ -121,11 +166,11 @@ struct ConfigOption /// line. If false, the default value will be put into #value. bool m_beenSet; /// The value of the configuration option. - string m_value; + std::string m_value; /// Default value of the configuration option. - string m_defValue; + std::string m_defValue; /// Description of the configuration option. - string m_desc; + std::string m_desc; }; /** @@ -137,26 +182,23 @@ class Module { public: FIELD_UTILS_EXPORT Module(FieldSharedPtr p_f) - : m_f(p_f), m_requireEquiSpaced(false) + : m_f(p_f) { } virtual void Process(po::variables_map &vm) = 0; virtual std::string GetModuleName() = 0; - FIELD_UTILS_EXPORT void RegisterConfig(string key, string value); - FIELD_UTILS_EXPORT void PrintConfig(); - FIELD_UTILS_EXPORT void SetDefaults(); - - FIELD_UTILS_EXPORT bool GetRequireEquiSpaced(void) + virtual std::string GetModuleDescription() { - return m_requireEquiSpaced; + return " "; } - FIELD_UTILS_EXPORT void SetRequireEquiSpaced(bool pVal) - { - m_requireEquiSpaced = pVal; - } + virtual ModulePriority GetModulePriority() = 0; + + FIELD_UTILS_EXPORT void RegisterConfig(std::string key, std::string value); + FIELD_UTILS_EXPORT void PrintConfig(); + FIELD_UTILS_EXPORT void SetDefaults(); FIELD_UTILS_EXPORT void EvaluateTriFieldAtEquiSpacedPts( LocalRegions::ExpansionSharedPtr &exp, @@ -169,8 +211,7 @@ protected: /// Field object FieldSharedPtr m_f; /// List of configuration values. - map m_config; - bool m_requireEquiSpaced; + std::map m_config;; }; /** @@ -186,12 +227,13 @@ class InputModule : public Module { public: InputModule(FieldSharedPtr p_m); - FIELD_UTILS_EXPORT void AddFile(string fileType, string fileName); + FIELD_UTILS_EXPORT void AddFile(std::string fileType, std::string fileName); + FIELD_UTILS_EXPORT static std::string GuessFormat(std::string fileName); protected: /// Print summary of elements. void PrintSummary(); - set m_allowedFiles; + std::set m_allowedFiles; }; typedef boost::shared_ptr InputModuleSharedPtr; @@ -223,11 +265,11 @@ public: protected: /// Output stream - ofstream m_fldFile; + std::ofstream m_fldFile; }; -typedef pair ModuleKey; -FIELD_UTILS_EXPORT ostream &operator<<(ostream &os, const ModuleKey &rhs); +typedef pair ModuleKey; +FIELD_UTILS_EXPORT std::ostream &operator<<(ostream &os, const ModuleKey &rhs); typedef boost::shared_ptr ModuleSharedPtr; typedef LibUtilities::NekFactory @@ -243,13 +285,13 @@ public: { m_size = size; m_rank = rank; - m_type = "FieldConvert parallel"; + m_type = "FieldConvert parallel"; } FieldConvertComm(int size, int rank) : CommSerial(0, NULL) { m_size = size; m_rank = rank; - m_type = "FieldConvert parallel"; + m_type = "FieldConvert parallel"; } virtual ~FieldConvertComm() { diff --git a/library/FieldUtils/OutputModules/OutputFileBase.cpp b/library/FieldUtils/OutputModules/OutputFileBase.cpp new file mode 100644 index 0000000000000000000000000000000000000000..525ec2f0844251483db10094e90dd9aaccc7ea63 --- /dev/null +++ b/library/FieldUtils/OutputModules/OutputFileBase.cpp @@ -0,0 +1,474 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: OutputFileBase.cpp +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Base class for outputting to a file +// +//////////////////////////////////////////////////////////////////////////////// + +#include +#include +using namespace std; + +#include "OutputFileBase.h" +#include +#include +#include + +namespace Nektar +{ +namespace FieldUtils +{ + +OutputFileBase::OutputFileBase(FieldSharedPtr f) : OutputModule(f) +{ + m_requireEquiSpaced = false; + m_config["writemultiplefiles"] = + ConfigOption(true,"0","Write multiple files in parallel or when using nparts option"); +} + +OutputFileBase::~OutputFileBase() +{ +} + +void OutputFileBase::Process(po::variables_map &vm) +{ + string filename = m_config["outfile"].as(); + + if(m_f->m_fieldPts != LibUtilities::NullPtsField) + { + ASSERTL0(!m_f->m_writeBndFld, + "Boundary can't be obtained from pts."); + if( WriteFile(filename, vm)) + { + OutputFromPts(vm); + + if (vm.count("error")) + { + PrintErrorFromPts(); + } + } + } + else if(m_f->m_exp.size()) + { + // reset expansion definition to use equispaced points if required. + if (m_requireEquiSpaced && (vm.count("noequispaced") == 0 ) && + m_f->m_exp[0]->GetNumElmts() != 0) + { + ConvertExpToEquispaced(vm); + } + + if (m_f->m_writeBndFld) + { + if (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()) + { + cout << "\t" << GetModuleName() + << ": Writing boundary file(s): "; + for (int i = 0; i < m_f->m_bndRegionsToWrite.size(); ++i) + { + cout << m_f->m_bndRegionsToWrite[i]; + if (i < m_f->m_bndRegionsToWrite.size() - 1) + { + cout << ", "; + } + } + cout << endl; + } + } + if (m_f->m_writeBndFld) + { + int nfields = m_f->m_exp.size(); + int normdim = m_f->m_graph->GetMeshDimension(); + + // Prepare for normals output + if (m_f->m_addNormals) + { + // Prepare for creating expansions for normals + m_f->m_exp.resize(nfields + normdim);; + + // Include normal name in m_variables + string normstr[3] = {"Norm_x", "Norm_y", "Norm_z"}; + for (int j = 0; j < normdim; ++j) + { + m_f->m_exp[nfields+j] = + m_f->AppendExpList(m_f->m_numHomogeneousDir); + m_f->m_variables.push_back(normstr[j]); + } + } + + // Move m_exp to a new expansion vector + vector exp(m_f->m_exp.size()); + exp.swap(m_f->m_exp); + + Array > + BndExp(exp.size()); + for (int i = 0; i < exp.size(); ++i) + { + BndExp[i] = exp[i]->GetBndCondExpansions(); + } + + // get hold of partition boundary regions so we can match it to + // desired region extraction + SpatialDomains::BoundaryConditions bcs(m_f->m_session, + exp[0]->GetGraph()); + const SpatialDomains::BoundaryRegionCollection bregions = + bcs.GetBoundaryRegions(); + SpatialDomains::BoundaryRegionCollection::const_iterator breg_it; + map BndRegionMap; + int cnt = 0; + for (breg_it = bregions.begin(); breg_it != bregions.end(); + ++breg_it, ++cnt) + { + BndRegionMap[breg_it->first] = cnt; + } + + // find ending of output file and insert _b1, _b2 + int dot = filename.find_last_of('.') + 1; + string ext = filename.substr(dot, filename.length() - dot); + string name = filename.substr(0, dot - 1); + + for (int i = 0; i < m_f->m_bndRegionsToWrite.size(); ++i) + { + string outname = + name + "_b" + + boost::lexical_cast(m_f->m_bndRegionsToWrite[i]) + + "." + ext; + + if(!WriteFile(outname, vm)) + { + continue; + } + RegisterConfig("outfile", outname); + + if (BndRegionMap.count(m_f->m_bndRegionsToWrite[i]) == 1) + { + int Border = BndRegionMap[m_f->m_bndRegionsToWrite[i]]; + + for (int j = 0; j < exp.size(); ++j) + { + m_f->m_exp[j] = BndExp[j][Border]; + m_f->m_exp[j]->BwdTrans( + m_f->m_exp[j]->GetCoeffs(), + m_f->m_exp[j]->UpdatePhys()); + } + + if (m_f->m_addNormals) + { + // Get normals + Array > NormPhys; + exp[0]->GetBoundaryNormals(Border, NormPhys); + + // add normal coefficients to expansions + for (int j = 0; j < normdim; ++j) + { + m_f->m_exp[nfields+j] = BndExp[nfields+j][Border]; + Vmath::Vcopy(m_f->m_exp[nfields+j]->GetTotPoints(), + NormPhys[j], 1, + m_f->m_exp[nfields+j]->UpdatePhys(), 1); + m_f->m_exp[nfields+j]->FwdTrans_IterPerExp( + m_f->m_exp[nfields+j]->GetPhys(), + m_f->m_exp[nfields+j]->UpdateCoeffs()); + } + } + OutputFromExp(vm); + // output error for regression checking. + if (vm.count("error")) + { + PrintErrorFromExp(); + } + } + else + { + // Empty expansion for parallel communication + for (int j = 0; j < exp.size(); ++j) + { + m_f->m_exp[j] = + MemoryManager:: + AllocateSharedPtr(); + } + OutputFromExp(vm); + // output error for regression checking. + if (vm.count("error")) + { + PrintErrorFromExp(); + } + } + } + } + else + { + if( WriteFile(filename, vm)) + { + OutputFromExp(vm); + // output error for regression checking. + if (vm.count("error")) + { + PrintErrorFromExp(); + } + } + } + } + else if(m_f->m_data.size()) + { + ASSERTL0(!m_f->m_writeBndFld, + "Boundary extraction requires xml file."); + if( WriteFile(filename, vm)) + { + OutputFromData(vm); + } + } +} + +bool OutputFileBase::WriteFile(std::string &filename, po::variables_map &vm) +{ + // Get path to file. If procid was defined, get the full name + // to avoid checking files from other partitions + fs::path outFile; + if(vm.count("nparts")) + { + outFile = GetFullOutName(filename, vm); + } + else + { + outFile = GetPath(filename, vm); + } + + LibUtilities::CommSharedPtr comm; + if (m_f->m_comm) + { + comm = m_f->m_comm; + } + else + { + comm = LibUtilities::GetCommFactory().CreateInstance( + "Serial", 0, 0); + } + + int count = fs::exists(outFile) ? 1 : 0; + comm->AllReduce(count, LibUtilities::ReduceSum); + + int writeFile = 1; + if (count && (vm.count("forceoutput") == 0)) + { + if(vm.count("nparts") == 0 ) // do not do check if --nparts is enabled. + { + + writeFile = 0; // set to zero for reduce all to be correct. + + if (comm->TreatAsRankZero()) + { + string answer; + cout << "Did you wish to overwrite " << outFile << " (y/n)? "; + getline(cin, answer); + if (answer.compare("y") == 0) + { + writeFile = 1; + } + else + { + cout << "Not writing file " << filename + << " because it already exists" << endl; + } + } + comm->AllReduce(writeFile, LibUtilities::ReduceSum); + } + } + return (writeFile == 0) ? false : true; +} + +void OutputFileBase::ConvertExpToEquispaced(po::variables_map &vm) +{ + // Information to create new expansion + int numFields = m_f->m_exp.size(); + m_f->m_fielddef = m_f->m_exp[0]->GetFieldDefinitions(); + + // Set points to equispaced + int nPointsNew = 0; + if (vm.count("output-points")) + { + nPointsNew = vm["output-points"].as(); + } + m_f->m_graph->SetExpansionsToEvenlySpacedPoints(nPointsNew); + + // Save original expansion + vector expOld = m_f->m_exp; + // Create new expansion + m_f->m_exp[0] = m_f->SetUpFirstExpList(m_f->m_numHomogeneousDir, + true); + for(int i = 1; i < numFields; ++i) + { + m_f->m_exp[i] = + m_f->AppendExpList(m_f->m_numHomogeneousDir); + } + // Extract result to new expansion + for(int i = 0; i < numFields; ++i) + { + m_f->m_exp[i]->ExtractCoeffsToCoeffs( + expOld[i], + expOld[i]->GetCoeffs(), + m_f->m_exp[i]->UpdateCoeffs()); + m_f->m_exp[i]->BwdTrans( + m_f->m_exp[i]->GetCoeffs(), + m_f->m_exp[i]->UpdatePhys()); + } + // Extract boundary expansion if needed + if (m_f->m_writeBndFld) + { + Array BndExpOld; + MultiRegions::ExpListSharedPtr BndExp; + for(int i = 0; i < numFields; ++i) + { + BndExpOld = expOld[i]->GetBndCondExpansions(); + for(int j = 0; j < BndExpOld.num_elements(); ++j) + { + BndExp = m_f->m_exp[i]->UpdateBndCondExpansion(j); + + BndExp->ExtractCoeffsToCoeffs( + BndExpOld[j], + BndExpOld[j]->GetCoeffs(), + BndExp->UpdateCoeffs()); + } + } + } +} + +void OutputFileBase::PrintErrorFromPts() +{ + int coordim = m_f->m_fieldPts->GetDim(); + std::string coordVars[] = { "x", "y", "z" }; + + vector variables = m_f->m_variables; + variables.insert(variables.begin(), + coordVars, + coordVars + coordim); + // Get fields and coordinates + Array > fields(variables.size()); + + // We can just grab everything from points. This should be a + // reference, not a copy. + m_f->m_fieldPts->GetPts(fields); + for (int i = 0; i < fields.num_elements(); ++i) + { + // calculate L2 and Linf value + int npts = fields[i].num_elements(); + + NekDouble l2err = 0.0; + NekDouble linferr = 0.0; + for (int j = 0; j < npts; ++j) + { + l2err += fields[i][j] * fields[i][j]; + linferr = max(linferr, fabs(fields[i][j])); + } + + m_f->m_comm->AllReduce(l2err , LibUtilities::ReduceSum); + m_f->m_comm->AllReduce(npts , LibUtilities::ReduceSum); + m_f->m_comm->AllReduce(linferr, LibUtilities::ReduceMax); + + l2err /= npts; + l2err = sqrt(l2err); + + if (m_f->m_comm->TreatAsRankZero()) + { + cout << "L 2 error (variable " + << variables[i] << ") : " << l2err + << endl; + + cout << "L inf error (variable " + << variables[i] << ") : " << linferr + << endl; + } + } +} + +void OutputFileBase::PrintErrorFromExp() +{ + int coordim = + m_f->m_exp[0]->GetExp(0)->GetCoordim() + m_f->m_numHomogeneousDir; + int totpoints = m_f->m_exp[0]->GetTotPoints(); + std::string coordVars[] = { "x", "y", "z" }; + + // Set up storage for coordinates + Array > coords(coordim); + for (int i = 0; i < coordim; ++i) + { + coords[i] = Array(totpoints); + } + + // Get coordinates + if (coordim == 1) + { + m_f->m_exp[0]->GetCoords(coords[0]); + } + else if (coordim == 2) + { + m_f->m_exp[0]->GetCoords(coords[0], coords[1]); + } + else + { + m_f->m_exp[0]->GetCoords(coords[0], coords[1], coords[2]); + } + + for (int j = 0; j < coordim; ++j) + { + NekDouble l2err = m_f->m_exp[0]->L2 (coords[j]); + NekDouble linferr = m_f->m_exp[0]->Linf(coords[j]); + + if (m_f->m_comm->TreatAsRankZero()) + { + cout << "L 2 error (variable " + << coordVars[j] << ") : " << l2err + << endl; + + cout << "L inf error (variable " + << coordVars[j] << ") : " << linferr + << endl; + } + } + + for (int j = 0; j < m_f->m_exp.size(); ++j) + { + NekDouble l2err = m_f->m_exp[j]->L2 (m_f->m_exp[j]->GetPhys()); + NekDouble linferr = m_f->m_exp[j]->Linf(m_f->m_exp[j]->GetPhys()); + + if (m_f->m_comm->TreatAsRankZero()) + { + cout << "L 2 error (variable " + << m_f->m_variables[j] << ") : " << l2err + << endl; + + cout << "L inf error (variable " + << m_f->m_variables[j] << ") : " << linferr + << endl; + } + } +} + +} +} diff --git a/library/FieldUtils/OutputModules/OutputFileBase.h b/library/FieldUtils/OutputModules/OutputFileBase.h new file mode 100644 index 0000000000000000000000000000000000000000..9788c4139dcec4c19e69058dcc4e31e57dee52e9 --- /dev/null +++ b/library/FieldUtils/OutputModules/OutputFileBase.h @@ -0,0 +1,102 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: OutputFileBase.h +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Base class for outputting to a file +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIELDUTILS_OUTPUTFILEBASE +#define FIELDUTILS_OUTPUTFILEBASE + +#include "../Module.h" +#include + +namespace Nektar +{ +namespace FieldUtils +{ + +/// Converter from fld to vtk. +class OutputFileBase : public OutputModule +{ +public: + OutputFileBase(FieldSharedPtr f); + virtual ~OutputFileBase(); + + /// Write fld to output file. + virtual void Process(po::variables_map &vm); + + virtual std::string GetModuleName() + { + return "OutputFileBase"; + } + + virtual std::string GetModuleDescription() + { + return "Writing file"; + } + + virtual ModulePriority GetModulePriority() + { + return eOutput; + } + +protected: + /// Write from pts to output file. + virtual void OutputFromPts(po::variables_map &vm) = 0; + + /// Write from m_exp to output file. + virtual void OutputFromExp(po::variables_map &vm) = 0; + + /// Write from data to output file. + virtual void OutputFromData(po::variables_map &vm) = 0; + + virtual fs::path GetPath(std::string &filename, + po::variables_map &vm) = 0; + + virtual fs::path GetFullOutName(std::string &filename, + po::variables_map &vm) = 0; + + bool m_requireEquiSpaced; + +private: + bool WriteFile(std::string &filename, po::variables_map &vm); + + void ConvertExpToEquispaced(po::variables_map &vm); + + void PrintErrorFromPts(); + + void PrintErrorFromExp(); +}; +} +} + +#endif diff --git a/library/FieldUtils/OutputModules/OutputFld.cpp b/library/FieldUtils/OutputModules/OutputFld.cpp index 5a1e2091d568e7dc306fd9d49d7a45b483fbfc55..7d006058a5190cdd3a3f8daf9177b6bde2ebda0f 100644 --- a/library/FieldUtils/OutputModules/OutputFld.cpp +++ b/library/FieldUtils/OutputModules/OutputFld.cpp @@ -35,6 +35,7 @@ #include #include +#include using namespace std; #include "OutputFld.h" @@ -54,7 +55,7 @@ ModuleKey OutputFld::m_className[2] = { "Writes a Fld file."), }; -OutputFld::OutputFld(FieldSharedPtr f) : OutputModule(f) +OutputFld::OutputFld(FieldSharedPtr f) : OutputFileBase(f) { m_config["format"] = ConfigOption( false, "Xml", "Output format of field file"); @@ -64,259 +65,122 @@ OutputFld::~OutputFld() { } -void OutputFld::Process(po::variables_map &vm) +void OutputFld::OutputFromPts(po::variables_map &vm) { + ASSERTL0(false, "OutputFld can't write using Pts information."); +} + +void OutputFld::OutputFromExp(po::variables_map &vm) +{ + ASSERTL0(m_f->m_variables.size(), + "OutputFld: need input data.") + // Extract the output filename and extension string filename = m_config["outfile"].as(); - // Set up communicator and FieldIO object. - LibUtilities::CommSharedPtr c = m_f->m_session ? m_f->m_session->GetComm() : - LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); + // Set up FieldIO object. LibUtilities::FieldIOSharedPtr fld = LibUtilities::GetFieldIOFactory().CreateInstance( - m_config["format"].as(), c, true); - - if (m_f->m_writeBndFld) - { - ModuleKey module; - - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputFld: Writing boundary file(s): "; - for (int i = 0; i < m_f->m_bndRegionsToWrite.size(); ++i) - { - cout << m_f->m_bndRegionsToWrite[i]; - if (i < m_f->m_bndRegionsToWrite.size() - 1) - { - cout << ","; - } - } - cout << endl; - } - } - - // Extract data to boundaryconditions - if (m_f->m_fldToBnd) - { - for (int i = 0; i < m_f->m_exp.size(); ++i) - { - m_f->m_exp[i]->FillBndCondFromField(); - } - } - - int nfields = m_f->m_exp.size(); - Array > BndExp( - nfields); - for (int i = 0; i < nfields; ++i) - { - BndExp[i] = m_f->m_exp[i]->GetBndCondExpansions(); - } - - // get hold of partition boundary regions so we can match it to desired - // region extraction - SpatialDomains::BoundaryConditions bcs(m_f->m_session, - m_f->m_exp[0]->GetGraph()); - const SpatialDomains::BoundaryRegionCollection bregions = - bcs.GetBoundaryRegions(); - SpatialDomains::BoundaryRegionCollection::const_iterator breg_it; - map BndRegionMap; - int cnt = 0; - for (breg_it = bregions.begin(); breg_it != bregions.end(); - ++breg_it, ++cnt) - { - BndRegionMap[breg_it->first] = cnt; - } + GetIOFormat(), m_f->m_comm, true); - // find ending of output file and insert _b1, _b2 - int dot = filename.find_last_of('.') + 1; - string ext = filename.substr(dot, filename.length() - dot); - string name = filename.substr(0, dot - 1); + int i, j, s; + int nfields = m_f->m_variables.size(); + int nstrips; + m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - for (int i = 0; i < m_f->m_bndRegionsToWrite.size(); ++i) + if(m_f->m_exp[0]->GetNumElmts() != 0) + { + std::vector FieldDef = + m_f->m_exp[0]->GetFieldDefinitions(); + std::vector > FieldData(FieldDef.size()); + for (s = 0; s < nstrips; ++s) { - string outname = - name + "_b" + - boost::lexical_cast(m_f->m_bndRegionsToWrite[i]) + "." + - ext; - - std::vector FieldDef; - std::vector > FieldData; - - if (BndRegionMap.count(m_f->m_bndRegionsToWrite[i]) == 1) + for (j = 0; j < nfields; ++j) { - int Border = BndRegionMap[m_f->m_bndRegionsToWrite[i]]; - - FieldDef = BndExp[0][Border]->GetFieldDefinitions(); - FieldData.resize(FieldDef.size()); - - for (int j = 0; j < nfields; ++j) - { - for (int k = 0; k < FieldDef.size(); ++k) - { - BndExp[j][Border]->AppendFieldData(FieldDef[k], - FieldData[k]); - - if (m_f->m_fielddef.size() > 0) - { - FieldDef[k]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - else - { - FieldDef[k]->m_fields.push_back( - m_f->m_session->GetVariable(j)); - } - } - } - - if (m_f->m_addNormals) + for (i = 0; i < FieldDef.size() / nstrips; ++i) { - int normdim = m_f->m_graph->GetMeshDimension(); - string normstr[3] = {"Norm_x", "Norm_y", "Norm_z"}; + int n = s * FieldDef.size() / nstrips + i; - // Get normals - Array > NormPhys; - m_f->m_exp[0]->GetBoundaryNormals(Border, NormPhys); - - // add normal coefficients to list to be dumped - for (int j = 0; j < normdim; ++j) - { - BndExp[0][Border]->FwdTrans( - NormPhys[j], BndExp[0][Border]->UpdateCoeffs()); - - for (int k = 0; k < FieldDef.size(); ++k) - { - BndExp[0][Border]->AppendFieldData(FieldDef[k], - FieldData[k]); - FieldDef[k]->m_fields.push_back(normstr[j]); - } - } - } - - // output error for regression checking. - if (vm.count("error")) - { - int rank = m_f->m_session->GetComm()->GetRank(); - - for (int j = 0; j < nfields; ++j) - { - BndExp[j][Border]->BwdTrans( - BndExp[j][Border]->GetCoeffs(), - BndExp[j][Border]->UpdatePhys()); - - // Note currently these calls will - // hange since not all partitions will - // call error. - NekDouble l2err = - BndExp[j][Border]->L2(BndExp[j][Border]->GetPhys()); - - NekDouble linferr = BndExp[j][Border]->Linf( - BndExp[j][Border]->GetPhys()); - - if (rank == 0) - { - cout << "L 2 error (variable " - << FieldDef[0]->m_fields[j] << ") : " << l2err - << endl; - - cout << "L inf error (variable " - << FieldDef[0]->m_fields[j] - << ") : " << linferr << endl; - } - } + FieldDef[n]->m_fields.push_back(m_f->m_variables[j]); + m_f->m_exp[s * nfields + j]->AppendFieldData( + FieldDef[n], FieldData[n]); } } - - fld->Write(outname, FieldDef, FieldData, m_f->m_fieldMetaDataMap); } + fld->Write(filename, FieldDef, FieldData, m_f->m_fieldMetaDataMap); } else { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputFld: Writing file..." << endl; - } - } + std::vector FieldDef = + std::vector(); + std::vector > FieldData = + std::vector >(); + fld->Write(filename, FieldDef, FieldData, m_f->m_fieldMetaDataMap); + } +} - fs::path writefile(filename); - int writefld = 1; - if (fs::exists(writefile) && (vm.count("forceoutput") == 0)) - { - int rank = 0; - LibUtilities::CommSharedPtr comm; +void OutputFld::OutputFromData(po::variables_map &vm) +{ + // Extract the output filename and extension + string filename = m_config["outfile"].as(); + // Set up FieldIO object. + LibUtilities::FieldIOSharedPtr fld = + LibUtilities::GetFieldIOFactory().CreateInstance( + GetIOFormat(), m_f->m_comm, true); - if (m_f->m_session) - { - comm = m_f->m_session->GetComm(); - rank = comm->GetRank(); - } - else - { - comm = LibUtilities::GetCommFactory().CreateInstance( - "Serial", 0, 0); - } + fld->Write(filename, m_f->m_fielddef, m_f->m_data, + m_f->m_fieldMetaDataMap); +} - writefld = 0; // set to zero for reduce all to be correct. +fs::path OutputFld::GetPath(std::string &filename, + po::variables_map &vm) +{ + return fs::path(filename); +} - if (rank == 0) - { - string answer; - cout << "Did you wish to overwrite " << filename << " (y/n)? "; - getline(cin, answer); - if (answer.compare("y") == 0) - { - writefld = 1; - } - else - { - cout << "Not writing file " << filename - << " because it already exists" << endl; - } - } +fs::path OutputFld::GetFullOutName(std::string &filename, + po::variables_map &vm) +{ + int nprocs = m_f->m_comm->GetSize(); + fs::path specPath(filename), fulloutname; + if (nprocs == 1) + { + fulloutname = specPath; + } + else + { + // Guess at filename that might belong to this process. + boost::format pad("P%1$07d.%2$s"); + pad % m_f->m_comm->GetRank() % "fld"; - comm->AllReduce(writefld, LibUtilities::ReduceSum); - } + // Generate full path name + fs::path poutfile(pad.str()); + fulloutname = specPath / poutfile; + } + return fulloutname; +} - if (writefld) +std::string OutputFld::GetIOFormat() +{ + std::string iofmt("Xml"); + if(m_f->m_session) + { + if (m_f->m_session->DefinesSolverInfo("IOFormat")) { - fld->Write(filename, m_f->m_fielddef, m_f->m_data, - m_f->m_fieldMetaDataMap); + iofmt = m_f->m_session->GetSolverInfo("IOFormat"); } - - // output error for regression checking. - if (vm.count("error")) + if (m_f->m_session->DefinesCmdLineArgument("io-format")) { - int rank = m_f->m_session->GetComm()->GetRank(); - - for (int j = 0; j < m_f->m_exp.size(); ++j) - { - if (m_f->m_exp[j]->GetPhysState() == false) - { - m_f->m_exp[j]->BwdTrans(m_f->m_exp[j]->GetCoeffs(), - m_f->m_exp[j]->UpdatePhys()); - } - - NekDouble l2err = m_f->m_exp[j]->L2(m_f->m_exp[j]->GetPhys()); - - NekDouble linferr = - m_f->m_exp[j]->Linf(m_f->m_exp[j]->GetPhys()); - if (rank == 0) - { - cout << "L 2 error (variable " - << m_f->m_fielddef[0]->m_fields[j] << ") : " << l2err - << endl; - - cout << "L inf error (variable " - << m_f->m_fielddef[0]->m_fields[j] << ") : " << linferr - << endl; - } - } + iofmt = + m_f->m_session->GetCmdLineArgument("io-format"); } } + if(m_config["format"].m_beenSet) + { + iofmt = m_config["format"].as(); + } + return iofmt; } + } } diff --git a/library/FieldUtils/OutputModules/OutputFld.h b/library/FieldUtils/OutputModules/OutputFld.h index 3ec2cdceccb273b8401f759ab41867b548727e7a..d5f84311e6b1055cf2e2654d30faffefa7a83a15 100644 --- a/library/FieldUtils/OutputModules/OutputFld.h +++ b/library/FieldUtils/OutputModules/OutputFld.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_OUTPUTFLD #define FIELDUTILS_OUTPUTFLD -#include "../Module.h" +#include "OutputFileBase.h" #include namespace Nektar @@ -44,8 +44,8 @@ namespace Nektar namespace FieldUtils { -/// Converter from fld to vtk. -class OutputFld : public OutputModule +/// Output to fld format. +class OutputFld : public OutputFileBase { public: /// Creates an instance of this class @@ -58,13 +58,30 @@ public: OutputFld(FieldSharedPtr f); virtual ~OutputFld(); - /// Write fld to output file. - virtual void Process(po::variables_map &vm); - virtual std::string GetModuleName() { return "OutputFld"; } + +protected: + /// Write from pts to output file. + virtual void OutputFromPts(po::variables_map &vm); + + /// Write from m_exp to output file. + virtual void OutputFromExp(po::variables_map &vm); + + /// Write from data to output file. + virtual void OutputFromData(po::variables_map &vm); + + virtual fs::path GetPath(std::string &filename, + po::variables_map &vm); + + virtual fs::path GetFullOutName(std::string &filename, + po::variables_map &vm); + +private: + std::string GetIOFormat(); + }; } } diff --git a/library/FieldUtils/OutputModules/OutputInfo.cpp b/library/FieldUtils/OutputModules/OutputInfo.cpp index ae409e6a191b9ef6bed8932f42b9ec53545b5137..83d864e067ad30986fbe3cab7cc7fc694cd37209 100644 --- a/library/FieldUtils/OutputModules/OutputInfo.cpp +++ b/library/FieldUtils/OutputModules/OutputInfo.cpp @@ -55,6 +55,7 @@ ModuleKey OutputInfo::m_className = OutputInfo::OutputInfo(FieldSharedPtr f) : OutputModule(f) { + m_config["nparts"] = ConfigOption(false, "NotSet", "Number of partitions over which to create the info file"); } OutputInfo::~OutputInfo() @@ -67,22 +68,16 @@ void OutputInfo::Process(po::variables_map &vm) string filename = m_config["outfile"].as(); int i; - if (m_f->m_verbose) - { - cout << "OutputInfo: Writing Info file..." << endl; - } - // partition mesh - ASSERTL0(vm.count("nprocs") > 0, - "--nprocs nust be specified with info output"); - - int nprocs = vm["nprocs"].as(); + ASSERTL0(m_config["nparts"].as().compare("NotSet") != 0, + "Need to specify nparts for info output"); + int nparts = m_config["nparts"].as(); LibUtilities::CommSharedPtr vComm = boost::shared_ptr( - new FieldConvertComm(0, NULL, nprocs, 0)); - vComm->SplitComm(1, nprocs); + new FieldConvertComm(0, NULL, nparts, 0)); + vComm->SplitComm(1, nparts); - // define new session with psuedo parallel communicator + // define new session with pseudo parallel communicator string xml_ending = "xml"; string xml_gz_ending = "xml.gz"; @@ -125,13 +120,13 @@ void OutputInfo::Process(po::variables_map &vm) LibUtilities::GetMeshPartitionFactory().CreateInstance(vPartitionerName, vSession); - vMeshPartition->PartitionMesh(nprocs, true); + vMeshPartition->PartitionMesh(nparts, true); // get hold of local partition ids - std::vector > ElementIDs(nprocs); + std::vector > ElementIDs(nparts); // Populate the list of element ID lists from all processes - for (i = 0; i < nprocs; ++i) + for (i = 0; i < nparts; ++i) { std::vector tmp; vMeshPartition->GetElementIDs(i, tmp); @@ -140,7 +135,7 @@ void OutputInfo::Process(po::variables_map &vm) // Set up output names std::vector filenames; - for (int i = 0; i < nprocs; ++i) + for (int i = 0; i < nparts; ++i) { boost::format pad("P%1$07d.fld"); pad % i; @@ -148,8 +143,7 @@ void OutputInfo::Process(po::variables_map &vm) } // Write the output file - LibUtilities::CommSharedPtr c = m_f->m_session ? m_f->m_session->GetComm() : - LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); + LibUtilities::CommSharedPtr c = m_f->m_comm; boost::shared_ptr fldXml = boost::static_pointer_cast( LibUtilities::GetFieldIOFactory().CreateInstance("Xml", c, true)); diff --git a/library/FieldUtils/OutputModules/OutputInfo.h b/library/FieldUtils/OutputModules/OutputInfo.h index d2b02473b791ec022594e06d721268aede85d956..ae0165cc00e624850f6eedbfab225440bdf73baa 100644 --- a/library/FieldUtils/OutputModules/OutputInfo.h +++ b/library/FieldUtils/OutputModules/OutputInfo.h @@ -64,6 +64,17 @@ public: { return "OutputInfo"; } + + virtual std::string GetModuleDescription() + { + return "Writing Info file"; + } + + virtual ModulePriority GetModulePriority() + { + return eOutput; + } + }; } } diff --git a/library/FieldUtils/OutputModules/OutputPts.cpp b/library/FieldUtils/OutputModules/OutputPts.cpp index b7a43a93f31b5f0a54424a62ed2c7000c6892db7..6bd76d41e6bf322bb82f27b0b98b9b733e81b5c1 100644 --- a/library/FieldUtils/OutputModules/OutputPts.cpp +++ b/library/FieldUtils/OutputModules/OutputPts.cpp @@ -56,7 +56,7 @@ ModuleKey OutputPts::m_className[5] = { }; -OutputPts::OutputPts(FieldSharedPtr f) : OutputModule(f) +OutputPts::OutputPts(FieldSharedPtr f) : OutputFileBase(f) { } @@ -64,141 +64,81 @@ OutputPts::~OutputPts() { } -void OutputPts::Process(po::variables_map &vm) +void OutputPts::OutputFromPts(po::variables_map &vm) { // Extract the output filename and extension string filename = m_config["outfile"].as(); - if (m_f->m_verbose) + if (boost::filesystem::path(filename).extension() == ".csv") { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputPts: Writing file..." << endl; - } + LibUtilities::CsvIO csvIO(m_f->m_comm); + csvIO.Write(filename, m_f->m_fieldPts); } + else + { + LibUtilities::PtsIO ptsIO(m_f->m_comm); + ptsIO.Write(filename, m_f->m_fieldPts); + } +} + +void OutputPts::OutputFromExp(po::variables_map &vm) +{ + Array > tmp( + m_f->m_exp[0]->GetCoordim(0) + + m_f->m_variables.size()); - fs::path writefile(filename); - int writepts = 1; - if (fs::exists(writefile) && (vm.count("forceoutput") == 0)) + switch (m_f->m_exp[0]->GetCoordim(0)) { - LibUtilities::CommSharedPtr comm = m_f->m_comm; - int rank = comm->GetRank(); - writepts = 0; // set to zero for reduce all to be correct. - - if (rank == 0) - { - string answer; - cout << "Did you wish to overwrite " << filename << " (y/n)? "; - getline(cin, answer); - if (answer.compare("y") == 0) - { - writepts = 1; - } - else - { - cout << "Not writing file " << filename - << " because it already exists" << endl; - } - } - - comm->AllReduce(writepts, LibUtilities::ReduceSum); + case 1: + tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); + m_f->m_exp[0]->GetCoords(tmp[0]); + break; + + case 2: + tmp[1] = Array(m_f->m_exp[0]->GetTotPoints()); + tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); + m_f->m_exp[0]->GetCoords(tmp[0], tmp[1]); + break; + + case 3: + tmp[2] = Array(m_f->m_exp[0]->GetTotPoints()); + tmp[1] = Array(m_f->m_exp[0]->GetTotPoints()); + tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); + m_f->m_exp[0]->GetCoords(tmp[0], tmp[1], tmp[2]); + break; } - if (writepts) + for (int i = 0; i < m_f->m_variables.size(); ++i) { - LibUtilities::PtsFieldSharedPtr fPts = m_f->m_fieldPts; - if(m_f->m_fieldPts == LibUtilities::NullPtsField) - { - Array > tmp( - m_f->m_exp[0]->GetCoordim(0) + - m_f->m_fielddef[0]->m_fields.size()); - - switch (m_f->m_exp[0]->GetCoordim(0)) - { - case 1: - tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); - m_f->m_exp[0]->GetCoords(tmp[0]); - break; - - case 2: - tmp[1] = Array(m_f->m_exp[0]->GetTotPoints()); - tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); - m_f->m_exp[0]->GetCoords(tmp[0], tmp[1]); - break; - - case 3: - tmp[2] = Array(m_f->m_exp[0]->GetTotPoints()); - tmp[1] = Array(m_f->m_exp[0]->GetTotPoints()); - tmp[0] = Array(m_f->m_exp[0]->GetTotPoints()); - m_f->m_exp[0]->GetCoords(tmp[0], tmp[1], tmp[2]); - break; - } - - for (int i = 0; i < m_f->m_fielddef[0]->m_fields.size(); ++i) - { - tmp[i + m_f->m_exp[0]->GetCoordim(0)] = - m_f->m_exp[i]->GetPhys(); - } - fPts = - MemoryManager::AllocateSharedPtr( - m_f->m_exp[0]->GetCoordim(0), - m_f->m_fielddef[0]->m_fields, - tmp); - } - - if (boost::filesystem::path(filename).extension() == ".csv") - { - LibUtilities::CsvIO csvIO(m_f->m_comm); - csvIO.Write(filename, fPts); - } - else - { - LibUtilities::PtsIO ptsIO(m_f->m_comm); - ptsIO.Write(filename, fPts); - } - - // output error for regression checking. - if (vm.count("error")) - { - int rank = m_f->m_comm->GetRank(); - - for (int j = 0; j < fPts->GetNFields(); ++j) - { - Array tmp(fPts->GetNpoints()); - Vmath::Vmul(fPts->GetNpoints(), - fPts->GetPts(fPts->GetDim() + j), 1, - fPts->GetPts(fPts->GetDim() + j), 1, - tmp, 1); - NekDouble l2err = Vmath::Vsum(fPts->GetNpoints(), tmp, 1); - - // if val too small, sqrt returns nan. - if (fabs(l2err) < NekConstants::kNekSqrtTol*NekConstants::kNekSqrtTol) - { - l2err = 0.0; - } - else - { - l2err = sqrt(l2err); - } - - NekDouble linferr = Vmath::Vamax(fPts->GetNpoints(), - fPts->GetPts(fPts->GetDim() + j), 1); - - if (rank == 0) - { - cout << "L 2 error (variable " - << fPts->GetFieldName(j) << ") : " << l2err - << endl; - - cout << "L inf error (variable " - << fPts->GetFieldName(j) << ") : " << linferr - << endl; - } - } - } + tmp[i + m_f->m_exp[0]->GetCoordim(0)] = + m_f->m_exp[i]->GetPhys(); + } + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr( + m_f->m_exp[0]->GetCoordim(0), + m_f->m_variables, + tmp); + OutputFromPts(vm); +} - } +void OutputPts::OutputFromData(po::variables_map &vm) +{ + ASSERTL0(false, "OutputPts can't write using only FieldData."); } + +fs::path OutputPts::GetPath(std::string &filename, + po::variables_map &vm) +{ + return fs::path(filename); +} + +fs::path OutputPts::GetFullOutName(std::string &filename, + po::variables_map &vm) +{ + return fs::path(filename); +} + } } + diff --git a/library/FieldUtils/OutputModules/OutputPts.h b/library/FieldUtils/OutputModules/OutputPts.h index 80f7ac1b1d37a9987bde6f1cd63ac5ebcda8fb24..d191380d4c0c68a052a2b349050c37612c1d7b5b 100644 --- a/library/FieldUtils/OutputModules/OutputPts.h +++ b/library/FieldUtils/OutputModules/OutputPts.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_OUTPUTPTS #define FIELDUTILS_OUTPUTPTS -#include "../Module.h" +#include "OutputFileBase.h" #include namespace Nektar @@ -45,7 +45,7 @@ namespace FieldUtils { /// Converter from fld to pts. -class OutputPts : public OutputModule +class OutputPts : public OutputFileBase { public: /// Creates an instance of this class @@ -58,13 +58,27 @@ public: OutputPts(FieldSharedPtr f); virtual ~OutputPts(); - /// Write fld to output file. - virtual void Process(po::variables_map &vm); - virtual std::string GetModuleName() { return "OutputPts"; } + +protected: + /// Write from pts to output file. + virtual void OutputFromPts(po::variables_map &vm); + + /// Write from m_exp to output file. + virtual void OutputFromExp(po::variables_map &vm); + + /// Write from data to output file. + virtual void OutputFromData(po::variables_map &vm); + + virtual fs::path GetPath(std::string &filename, + po::variables_map &vm); + + virtual fs::path GetFullOutName(std::string &filename, + po::variables_map &vm); + }; } } diff --git a/library/FieldUtils/OutputModules/OutputStdOut.cpp b/library/FieldUtils/OutputModules/OutputStdOut.cpp index 73ce1f9ebba21033df3feb5c4c881aded7c2c3d3..a245382e2531cf8323e5370a5d079656ec22e373 100644 --- a/library/FieldUtils/OutputModules/OutputStdOut.cpp +++ b/library/FieldUtils/OutputModules/OutputStdOut.cpp @@ -62,13 +62,7 @@ OutputStdOut::~OutputStdOut() void OutputStdOut::Process(po::variables_map &vm) { - // Extract the output filename and extension - string filename = m_config["outfile"].as(); - - if (m_f->m_verbose) - { - cout << "OutputStdOut: Output written to StdOut" << endl; - } } + } } diff --git a/library/FieldUtils/OutputModules/OutputStdOut.h b/library/FieldUtils/OutputModules/OutputStdOut.h index b085ed9f82a3016e4c26cddb214fe60da9f3ac2f..4ab715df6fa02bf73236f7eaa9b7f560505bc9f6 100644 --- a/library/FieldUtils/OutputModules/OutputStdOut.h +++ b/library/FieldUtils/OutputModules/OutputStdOut.h @@ -64,6 +64,17 @@ public: { return "OutputStdOut"; } + + virtual std::string GetModuleDescription() + { + return "Output written to StdOut"; + } + + virtual ModulePriority GetModulePriority() + { + return eOutput; + } + }; } } diff --git a/library/FieldUtils/OutputModules/OutputTecplot.cpp b/library/FieldUtils/OutputModules/OutputTecplot.cpp index f382a73e38ac976f26eb3dc35431a87c110c2e6c..80f740485c5db3a9894e05f239d6b3b8d55edc8c 100644 --- a/library/FieldUtils/OutputModules/OutputTecplot.cpp +++ b/library/FieldUtils/OutputModules/OutputTecplot.cpp @@ -70,22 +70,32 @@ ModuleKey OutputTecplotBinary::m_className = OutputTecplotBinary::create, "Writes a Tecplot file in binary plt format."); -OutputTecplot::OutputTecplot(FieldSharedPtr f) : OutputModule(f), +OutputTecplot::OutputTecplot(FieldSharedPtr f) : OutputFileBase(f), m_binary(false), m_oneOutputFile(false) { - if (!f->m_setUpEquiSpacedFields) - { - m_requireEquiSpaced = true; - } - m_config["writemultiplefiles"] = - ConfigOption(true,"0","Write multiple files in parallel"); + m_requireEquiSpaced = true; } OutputTecplot::~OutputTecplot() { } +void OutputTecplot::Process(po::variables_map &vm) +{ + + if(m_config["writemultiplefiles"].as()) + { + m_oneOutputFile = false; + } + else + { + m_oneOutputFile = (m_f->m_comm->GetSize()> 1); + } + + OutputFileBase::Process(vm); +} + /** * @brief Helper function to write binary data to stream. */ @@ -137,280 +147,217 @@ template void WriteStream(std::ostream &outfile, data.size() * sizeof(T)); } -/** - * @brief Set up member variables to dump Tecplot format output. - */ -void OutputTecplot::Process(po::variables_map &vm) +void OutputTecplot::OutputFromPts(po::variables_map &vm) { LibUtilities::PtsFieldSharedPtr fPts = m_f->m_fieldPts; - m_numBlocks = 0; - - // Do nothing if no expansion defined - if (fPts == LibUtilities::NullPtsField && !m_f->m_exp.size()) + // do not output if zone is empty + if (fPts->GetNpoints() == 0) { return; } - - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputTecplot: Writing file..." << endl; - } - } - - // Extract the output filename and extension - string filename = m_config["outfile"].as(); - - int nprocs = m_f->m_comm->GetSize(); int rank = m_f->m_comm->GetRank(); + m_numBlocks = 0; - if(m_config["writemultiplefiles"].as()) - { - m_oneOutputFile = false; - } - else - { - m_oneOutputFile = nprocs > 1; - } - - // Amend for parallel output if required - if (nprocs != 1 && !m_oneOutputFile) - { - int dot = filename.find_last_of('.'); - string ext = filename.substr(dot, filename.length() - dot); - string procId = "_P" + boost::lexical_cast(rank); - string start = filename.substr(0, dot); - filename = start + procId + ext; - } - - std::string coordVars[] = { "x", "y", "z" }; - bool doError = (vm.count("error") == 1) ? true : false; - - // Open output file - ofstream outfile; - - if ((m_oneOutputFile && rank == 0) || !m_oneOutputFile) - { - outfile.open(filename.c_str(), m_binary ? ios::binary : ios::out); - } + m_coordim = fPts->GetDim(); - std::vector var; - bool writeHeader = true; + // Grab connectivity information. + fPts->GetConnectivity(m_conn); - if (fPts == LibUtilities::NullPtsField) + switch (fPts->GetPtsType()) { - // Standard tensor-product element setup. - std::vector fDef = - m_f->m_fielddef; - - if (fDef.size()) - { - var = fDef[0]->m_fields; - } - - // Calculate number of FE blocks - m_numBlocks = GetNumTecplotBlocks(); - - // Calculate coordinate dimension - int nBases = m_f->m_exp[0]->GetExp(0)->GetNumBases(); - MultiRegions::ExpansionType HomoExpType = m_f->m_exp[0]->GetExpType(); - - m_coordim = m_f->m_exp[0]->GetExp(0)->GetCoordim(); - var.insert(var.begin(), coordVars, coordVars + m_coordim); - - if (HomoExpType == MultiRegions::e3DH1D) + case LibUtilities::ePtsFile: + m_numPoints.resize(1); + m_numPoints[0] = fPts->GetNpoints(); + m_f->m_comm->AllReduce(m_numPoints[0], LibUtilities::ReduceSum); + m_zoneType = eOrdered; + break; + case LibUtilities::ePtsLine: + m_numPoints.resize(1); + m_numPoints[0] = fPts->GetPointsPerEdge(0); + m_zoneType = eOrdered; + break; + case LibUtilities::ePtsPlane: + m_numPoints.resize(2); + m_numPoints[0] = fPts->GetPointsPerEdge(0); + m_numPoints[1] = fPts->GetPointsPerEdge(1); + m_zoneType = eOrdered; + break; + case LibUtilities::ePtsBox: + m_numPoints.resize(3); + m_numPoints[0] = fPts->GetPointsPerEdge(0); + m_numPoints[1] = fPts->GetPointsPerEdge(1); + m_numPoints[2] = fPts->GetPointsPerEdge(2); + m_zoneType = eOrdered; + break; + case LibUtilities::ePtsTriBlock: { - int nPlanes = m_f->m_exp[0]->GetZIDs().num_elements(); - if (nPlanes == 1) // halfMode case - { - // do nothing - } - else + m_zoneType = eFETriangle; + for (int i = 0; i < m_conn.size(); ++i) { - nBases += 1; - m_coordim += 1; - NekDouble tmp = m_numBlocks * (nPlanes - 1); - m_numBlocks = (int)tmp; + m_numBlocks += m_conn[i].num_elements() / 3; } + break; } - else if (HomoExpType == MultiRegions::e3DH2D) + case LibUtilities::ePtsTetBlock: { - nBases += 2; - m_coordim += 2; + m_zoneType = eFETetrahedron; + for (int i = 0; i < m_conn.size(); ++i) + { + m_numBlocks += m_conn[i].num_elements() / 4; + } + break; } + default: + ASSERTL0(false, "This points type is not supported yet."); + } - m_zoneType = (TecplotZoneType)(2*(nBases-1) + 1); + // Get fields and coordinates + m_fields = + Array >(m_f->m_variables.size()+m_coordim); - // Calculate connectivity - CalculateConnectivity(); + // We can just grab everything from points. This should be a + // reference, not a copy. + fPts->GetPts(m_fields); - // Set up storage for output fields - m_fields = Array >(var.size()); + // Only write header if we're root or FE block; binary files always + // write header + m_writeHeader = + (m_zoneType != eOrdered || rank == 0) || m_binary; - // Get coordinates - int totpoints = m_f->m_exp[0]->GetTotPoints(); + WriteTecplotFile(vm); +} - for (int i = 0; i < m_coordim; ++i) - { - m_fields[i] = Array(totpoints); - } +void OutputTecplot::OutputFromExp(po::variables_map &vm) +{ + m_numBlocks = 0; + m_writeHeader = true; - if (m_coordim == 1) - { - m_f->m_exp[0]->GetCoords(m_fields[0]); - } - else if (m_coordim == 2) + // Calculate number of FE blocks + m_numBlocks = GetNumTecplotBlocks(); + + // Calculate coordinate dimension + int nBases = m_f->m_exp[0]->GetExp(0)->GetNumBases(); + MultiRegions::ExpansionType HomoExpType = m_f->m_exp[0]->GetExpType(); + + m_coordim = m_f->m_exp[0]->GetExp(0)->GetCoordim(); + + if (HomoExpType == MultiRegions::e3DH1D) + { + int nPlanes = m_f->m_exp[0]->GetZIDs().num_elements(); + if (nPlanes == 1) // halfMode case { - m_f->m_exp[0]->GetCoords(m_fields[0], m_fields[1]); + // do nothing } else { - m_f->m_exp[0]->GetCoords(m_fields[0], m_fields[1], m_fields[2]); + nBases += 1; + m_coordim += 1; + NekDouble tmp = m_numBlocks * (nPlanes - 1); + m_numBlocks = (int)tmp; } + } + else if (HomoExpType == MultiRegions::e3DH2D) + { + nBases += 2; + m_coordim += 2; + } - if (var.size() > m_coordim) - { - // Backward transform all data - for (int i = 0; i < m_f->m_exp.size(); ++i) - { - if (m_f->m_exp[i]->GetPhysState() == false) - { - m_f->m_exp[i]->BwdTrans(m_f->m_exp[i]->GetCoeffs(), - m_f->m_exp[i]->UpdatePhys()); - } - } + m_zoneType = (TecplotZoneType)(2*(nBases-1) + 1); - // Add references to m_fields - for (int i = 0; i < m_f->m_exp.size(); ++i) - { - m_fields[i + m_coordim] = m_f->m_exp[i]->UpdatePhys(); - } - } + // Calculate connectivity + CalculateConnectivity(); - // Dump L2 errors of fields. - if (doError) - { - for (int i = 0; i < m_fields.num_elements(); ++i) - { - NekDouble l2err = m_f->m_exp[0]->L2(m_fields[i]); - if (rank == 0) - { - cout << "L 2 error (variable " << var[i] << ") : " - << l2err << endl; - } - } - } + // Set up storage for output fields + m_fields = + Array >(m_f->m_variables.size()+m_coordim); + + // Get coordinates + int totpoints = m_f->m_exp[0]->GetTotPoints(); + + for (int i = 0; i < m_coordim; ++i) + { + m_fields[i] = Array(totpoints); + } + + if (m_coordim == 1) + { + m_f->m_exp[0]->GetCoords(m_fields[0]); + } + else if (m_coordim == 2) + { + m_f->m_exp[0]->GetCoords(m_fields[0], m_fields[1]); } else { - m_coordim = fPts->GetDim(); - - if (fPts->GetNpoints() == 0) - { - return; - } - - // Grab connectivity information. - fPts->GetConnectivity(m_conn); + m_f->m_exp[0]->GetCoords(m_fields[0], m_fields[1], m_fields[2]); + } - // Get field names - var = fPts->GetFieldNames(); - var.insert(var.begin(), coordVars, coordVars + m_coordim); + // Add references to m_fields + for (int i = 0; i < m_f->m_variables.size(); ++i) + { + m_fields[i + m_coordim] = m_f->m_exp[i]->UpdatePhys(); + } - switch (fPts->GetPtsType()) - { - case LibUtilities::ePtsFile: - case LibUtilities::ePtsLine: - m_numPoints.resize(1); - m_numPoints[0] = fPts->GetNpoints(); - m_zoneType = eOrdered; - break; - case LibUtilities::ePtsPlane: - m_numPoints.resize(2); - m_numPoints[0] = fPts->GetPointsPerEdge(0); - m_numPoints[1] = fPts->GetPointsPerEdge(1); - m_zoneType = eOrdered; - break; - case LibUtilities::ePtsBox: - m_numPoints.resize(3); - m_numPoints[0] = fPts->GetPointsPerEdge(0); - m_numPoints[1] = fPts->GetPointsPerEdge(1); - m_numPoints[2] = fPts->GetPointsPerEdge(2); - m_zoneType = eOrdered; - break; - case LibUtilities::ePtsTriBlock: - { - m_zoneType = eFETriangle; - for (int i = 0; i < m_conn.size(); ++i) - { - m_numBlocks += m_conn[i].num_elements() / 3; - } - break; - } - case LibUtilities::ePtsTetBlock: - { - m_zoneType = eFETetrahedron; - for (int i = 0; i < m_conn.size(); ++i) - { - m_numBlocks += m_conn[i].num_elements() / 4; - } - break; - } - default: - ASSERTL0(false, "This points type is not supported yet."); - } + WriteTecplotFile(vm); +} - // Get fields and coordinates - m_fields = Array >(var.size()); +void OutputTecplot::OutputFromData(po::variables_map &vm) +{ + ASSERTL0(false, "OutputTecplot can't write using only FieldData."); +} - // We can just grab everything from points. This should be a - // reference, not a copy. - fPts->GetPts(m_fields); +fs::path OutputTecplot::GetPath(std::string &filename, + po::variables_map &vm) +{ + int nprocs = m_f->m_comm->GetSize(); + string returnstr(filename); - // Only write header if we're root or FE block; binary files always - // write header - writeHeader = (m_zoneType != eOrdered || rank == 0) || m_binary; + // Amend for parallel output if required + if (nprocs != 1 && !m_oneOutputFile) + { + int rank = m_f->m_comm->GetRank(); + int dot = filename.find_last_of('.'); + string ext = filename.substr(dot, filename.length() - dot); + string procId = "_P" + boost::lexical_cast(rank); + string start = filename.substr(0, dot); + returnstr = start + procId + ext; + } + return fs::path(returnstr); +} - if (doError) - { - NekDouble l2err; - for (int i = 0; i < m_fields.num_elements(); ++i) - { - // calculate rms value - int npts = m_fields[i].num_elements(); +fs::path OutputTecplot::GetFullOutName(std::string &filename, + po::variables_map &vm) +{ + return GetPath(filename, vm); +} - l2err = 0.0; - for (int j = 0; j < npts; ++j) - { - l2err += m_fields[i][j] * m_fields[i][j]; - } +void OutputTecplot::WriteTecplotFile(po::variables_map &vm) +{ + // Variable names + std::string coordVars[] = { "x", "y", "z" }; + vector variables = m_f->m_variables; + variables.insert(variables.begin(), coordVars, coordVars + m_coordim); - m_f->m_comm->AllReduce(l2err, LibUtilities::ReduceSum); - m_f->m_comm->AllReduce(npts, LibUtilities::ReduceSum); + int nprocs = m_f->m_comm->GetSize(); + int rank = m_f->m_comm->GetRank(); - l2err /= npts; - l2err = sqrt(l2err); - if (rank == 0) - { - cout << "L 2 error (variable " << var[i] << ") : " - << l2err << endl; - } - } - } + // Extract the output filename and extension + string filename = m_config["outfile"].as(); + string outFile = LibUtilities::PortablePath(GetFullOutName(filename, vm)); + // Open output file + ofstream outfile; + if ((m_oneOutputFile && rank == 0) || !m_oneOutputFile) + { + outfile.open(outFile.c_str(), m_binary ? ios::binary : ios::out); } if (m_oneOutputFile) { // Reduce on number of blocks and number of points. m_f->m_comm->AllReduce(m_numBlocks, LibUtilities::ReduceSum); - for (int i = 0; i < m_numPoints.size(); ++i) - { - m_f->m_comm->AllReduce(m_numPoints[i], LibUtilities::ReduceSum); - } // Root process needs to know how much data everyone else has for // writing in parallel. @@ -430,9 +377,9 @@ void OutputTecplot::Process(po::variables_map &vm) m_f->m_comm->AllReduce(m_rankConnSizes, LibUtilities::ReduceSum); } - if (writeHeader) + if (m_writeHeader) { - WriteTecplotHeader(outfile, var); + WriteTecplotHeader(outfile, variables); } // Write zone data. @@ -444,7 +391,7 @@ void OutputTecplot::Process(po::variables_map &vm) if ((m_oneOutputFile && rank == 0) || !m_oneOutputFile) { - cout << "Written file: " << filename << endl; + cout << "Written file: " << GetFullOutName(filename,vm) << endl; } } @@ -543,16 +490,19 @@ void OutputTecplot::WriteTecplotZone(std::ofstream &outfile) for (int n = 1; n < m_f->m_comm->GetSize(); ++n) { - Array tmp(m_rankFieldSizes[n]); - m_f->m_comm->Recv(n, tmp); - - for (int i = 0; i < m_rankFieldSizes[n]; ++i) + if(m_rankFieldSizes[n]) { - if ((!(i % 1000)) && i) + Array tmp(m_rankFieldSizes[n]); + m_f->m_comm->Recv(n, tmp); + + for (int i = 0; i < m_rankFieldSizes[n]; ++i) { - outfile << std::endl; + if ((!(i % 1000)) && i) + { + outfile << std::endl; + } + outfile << tmp[i] << " "; } - outfile << tmp[i] << " "; } } outfile << std::endl; @@ -560,9 +510,12 @@ void OutputTecplot::WriteTecplotZone(std::ofstream &outfile) } else if (m_oneOutputFile && m_f->m_comm->GetRank() > 0) { - for (int i = 0; i < m_fields.num_elements(); ++i) + if(m_fields[0].num_elements()) { - m_f->m_comm->Send(0, m_fields[i]); + for (int i = 0; i < m_fields.num_elements(); ++i) + { + m_f->m_comm->Send(0, m_fields[i]); + } } } else @@ -585,23 +538,66 @@ void OutputTecplot::WriteTecplotZone(std::ofstream &outfile) } else { - std::string dirs[] = { "I", "J", "K" }; - outfile << "Zone"; - for (int i = 0; i < m_numPoints.size(); ++i) + if((m_oneOutputFile && m_f->m_comm->GetRank() == 0) || !m_oneOutputFile) { - outfile << ", " << dirs[i] << "=" << m_numPoints[i]; + std::string dirs[] = { "I", "J", "K" }; + outfile << "Zone"; + for (int i = 0; i < m_numPoints.size(); ++i) + { + outfile << ", " << dirs[i] << "=" << m_numPoints[i]; + } + outfile << ", F=POINT" << std::endl; } - outfile << ", F=POINT" << std::endl; - // Write out coordinates and field data: ordered by each point then each - // field. - for (int i = 0; i < m_fields[0].num_elements(); ++i) + if (m_oneOutputFile && m_f->m_comm->GetRank() == 0) { - for (int j = 0; j < m_fields.num_elements(); ++j) + Array tmp(m_fields.num_elements()); + for (int i = 0; i < m_fields[0].num_elements(); ++i) { - outfile << setw(12) << m_fields[j][i] << " "; + for (int j = 0; j < m_fields.num_elements(); ++j) + { + outfile << setw(12) << m_fields[j][i] << " "; + } + outfile << std::endl; + } + + for (int n = 1; n < m_f->m_comm->GetSize(); ++n) + { + for (int i = 0; i < m_rankFieldSizes[n]; ++i) + { + m_f->m_comm->Recv(n, tmp); + for (int j = 0; j < m_fields.num_elements(); ++j) + { + outfile << setw(12) << tmp[j] << " "; + } + outfile << std::endl; + } + } + } + else if (m_oneOutputFile && m_f->m_comm->GetRank() > 0) + { + Array tmp(m_fields.num_elements()); + for (int i = 0; i < m_fields[0].num_elements(); ++i) + { + for (int j = 0; j < m_fields.num_elements(); ++j) + { + tmp[j] = m_fields[j][i]; + } + m_f->m_comm->Send(0, tmp); + } + } + else + { + // Write out coordinates and field data: ordered by each + // point then each field. + for (int i = 0; i < m_fields[0].num_elements(); ++i) + { + for (int j = 0; j < m_fields.num_elements(); ++j) + { + outfile << setw(12) << m_fields[j][i] << " "; + } + outfile << std::endl; } - outfile << std::endl; } } } @@ -784,14 +780,20 @@ void OutputTecplot::WriteTecplotConnectivity(std::ofstream &outfile) if (m_oneOutputFile && m_f->m_comm->GetRank() > 0) { // Need to amalgamate connectivity information - Array conn(m_totConn); - for (int i = 0, cnt = 0; i < m_conn.size(); ++i) + if (m_totConn) { - Vmath::Vcopy(m_conn[i].num_elements(), &m_conn[i][0], 1, - &conn[cnt], 1); - cnt += m_conn[i].num_elements(); + Array conn(m_totConn); + for (int i = 0, cnt = 0; i < m_conn.size(); ++i) + { + if(m_conn[i].num_elements()) + { + Vmath::Vcopy(m_conn[i].num_elements(), &m_conn[i][0], 1, + &conn[cnt], 1); + cnt += m_conn[i].num_elements(); + } + } + m_f->m_comm->Send(0, conn); } - m_f->m_comm->Send(0, conn); } else { @@ -816,14 +818,17 @@ void OutputTecplot::WriteTecplotConnectivity(std::ofstream &outfile) for (int n = 1; n < m_f->m_comm->GetSize(); ++n) { - Array conn(m_rankConnSizes[n]); - m_f->m_comm->Recv(n, conn); - for (int j = 0; j < conn.num_elements(); ++j) + if(m_rankConnSizes[n]) { - outfile << conn[j] + offset + 1 << " "; - if ((!(j % 1000)) && j) + Array conn(m_rankConnSizes[n]); + m_f->m_comm->Recv(n, conn); + for (int j = 0; j < conn.num_elements(); ++j) { - outfile << std::endl; + outfile << conn[j] + offset + 1 << " "; + if ((!(j % 1000)) && j) + { + outfile << std::endl; + } } } offset += m_rankFieldSizes[n]; diff --git a/library/FieldUtils/OutputModules/OutputTecplot.h b/library/FieldUtils/OutputModules/OutputTecplot.h index 5c33d605d8ae2f91a3d5d887fe5fd9e82ea89ff7..8373cfb439d6bd91068a90bf030fbe6c665065b7 100644 --- a/library/FieldUtils/OutputModules/OutputTecplot.h +++ b/library/FieldUtils/OutputModules/OutputTecplot.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_OUTPUTTECPLOT #define FIELDUTILS_OUTPUTTECPLOT -#include "../Module.h" +#include "OutputFileBase.h" #include namespace Nektar @@ -58,7 +58,7 @@ enum TecplotZoneType{ /** * @brief Tecplot output class. */ -class OutputTecplot : public OutputModule +class OutputTecplot : public OutputFileBase { public: /// Creates an instance of this class @@ -71,13 +71,36 @@ public: OutputTecplot(FieldSharedPtr f); virtual ~OutputTecplot(); + virtual void Process(po::variables_map &vm); + virtual std::string GetModuleName() + { + return "OutputTecplot"; + } + protected: + /// Write from pts to output file. + virtual void OutputFromPts(po::variables_map &vm); + + /// Write from m_exp to output file. + virtual void OutputFromExp(po::variables_map &vm); + + /// Write from data to output file. + virtual void OutputFromData(po::variables_map &vm); + + virtual fs::path GetPath(std::string &filename, + po::variables_map &vm); + + virtual fs::path GetFullOutName(std::string &filename, + po::variables_map &vm); + /// True if writing binary field output bool m_binary; /// True if writing a single output file bool m_oneOutputFile; + /// True if writing header + bool m_writeHeader; /// Tecplot zone type of output TecplotZoneType m_zoneType; /// Number of points per block in Tecplot file @@ -102,14 +125,11 @@ protected: virtual void WriteTecplotZone(std::ofstream &outfile); virtual void WriteTecplotConnectivity(std::ofstream &outfile); + void WriteTecplotFile(po::variables_map &vm); + int GetNumTecplotBlocks(); void CalculateConnectivity(); - /// Returns this module's name. - virtual std::string GetModuleName() - { - return "OutputTecplot"; - } }; /** diff --git a/library/FieldUtils/OutputModules/OutputVtk.cpp b/library/FieldUtils/OutputModules/OutputVtk.cpp index 6b8142d41f237f46cb9bc6b839a7a459609d9a2b..8428770fafcca712e7e3f576dda55315e36731d7 100644 --- a/library/FieldUtils/OutputModules/OutputVtk.cpp +++ b/library/FieldUtils/OutputModules/OutputVtk.cpp @@ -50,7 +50,7 @@ namespace FieldUtils ModuleKey OutputVtk::m_className = GetModuleFactory().RegisterCreatorFunction( ModuleKey(eOutputModule, "vtu"), OutputVtk::create, "Writes a VTU file."); -OutputVtk::OutputVtk(FieldSharedPtr f) : OutputModule(f) +OutputVtk::OutputVtk(FieldSharedPtr f) : OutputFileBase(f) { m_requireEquiSpaced = true; } @@ -59,321 +59,277 @@ OutputVtk::~OutputVtk() { } -void OutputVtk::Process(po::variables_map &vm) +void OutputVtk::OutputFromPts(po::variables_map &vm) { - LibUtilities::PtsFieldSharedPtr fPts = m_f->m_fieldPts; - - // Do nothing if no expansion defined - if (fPts == LibUtilities::NullPtsField && !m_f->m_exp.size()) - { - return; - } - int i, j; - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputVtk: Writing file..." << endl; - } - } + LibUtilities::PtsFieldSharedPtr fPts = m_f->m_fieldPts; // Extract the output filename and extension - string filename = m_config["outfile"].as(); - string path; - - // amend for parallel output if required - if (m_f->m_session->GetComm()->GetSize() != 1) - { - int dot = filename.find_last_of('.'); - string ext = filename.substr(dot, filename.length() - dot); - string start = filename.substr(0, dot); - path = start + "_vtu"; - - boost::format pad("P%1$07d.vtu"); - pad % m_f->m_session->GetComm()->GetRank(); - filename = pad.str(); - - fs::path poutfile(filename.c_str()); - fs::path specPath(path.c_str()); - - if (m_f->m_comm->TreatAsRankZero()) - { - try - { - fs::create_directory(specPath); - } - catch (fs::filesystem_error &e) - { - ASSERTL0(false, "Filesystem error: " + string(e.what())); - } - cout << "Writing files to directory: " << specPath << endl; - } - - fs::path fulloutname = specPath / poutfile; - filename = LibUtilities::PortablePath(fulloutname); - m_f->m_comm->Block(); - } - else - { - fs::path specPath(filename.c_str()); - cout << "Writing: " << specPath << endl; - filename = LibUtilities::PortablePath(specPath); - } + string filename = PrepareOutput(vm); // Write solution. ofstream outfile(filename.c_str()); - m_f->m_exp[0]->WriteVtkHeader(outfile); + WriteVtkHeader(outfile); int nfields = 0; - int dim = 0; + int dim = fPts->GetDim(); - vector fieldname; - if (fPts == - LibUtilities::NullPtsField) // standard output in collapsed coordinates + int nvert, vtktype; + switch (fPts->GetPtsType()) { - int nstrips; - if (m_f->m_fielddef.size() == 0) + case LibUtilities::ePtsFile: + case LibUtilities::ePtsLine: { - nfields = 0; + ASSERTL0(false, + "VTK output needs setting up for ePtsFile and ePtsLine"); + break; } - else + case LibUtilities::ePtsPlane: { - nfields = m_f->m_fielddef[0]->m_fields.size(); + ASSERTL0(false, "VTK output needs setting up for PtsPlane"); + break; } - m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - - // Homogeneous strip variant - for (int s = 0; s < nstrips; ++s) + case LibUtilities::ePtsBox: { - // For each field write out field data for each expansion. - for (i = 0; i < m_f->m_exp[0]->GetNumElmts(); ++i) - { - m_f->m_exp[0]->WriteVtkPieceHeader(outfile, i, s); - - // For this expansion write out each field. - for (j = 0; j < nfields; ++j) - { - m_f->m_exp[s * nfields + j]->WriteVtkPieceData( - outfile, i, m_f->m_fielddef[0]->m_fields[j]); - } - m_f->m_exp[0]->WriteVtkPieceFooter(outfile, i); - } + ASSERTL0(false,"VTK output needs setting up for PtsBox"); + break; } - - if (m_f->m_exp[0]->GetNumElmts() == 0) + case LibUtilities::ePtsSegBlock: { - WriteEmptyVtkPiece(outfile); + nvert = 2; + vtktype = 3; + break; } - // save field names for parallel output - for (i = 0; i < nfields; ++i) + case LibUtilities::ePtsTriBlock: { - fieldname.push_back(m_f->m_fielddef[0]->m_fields[i]); + nvert = 3; + vtktype = 5; + break; } - } - else // write out data stored in fPts (for example if equispaced output is - // called). - { - int i = 0; - int j = 0; - - dim = fPts->GetDim(); - - int nvert, vtktype; - switch (fPts->GetPtsType()) + case LibUtilities::ePtsTetBlock: { - case LibUtilities::ePtsFile: - case LibUtilities::ePtsLine: - { - ASSERTL0(false, - "VTK output needs setting up for ePtsFile and ePtsLine"); - break; - } - case LibUtilities::ePtsPlane: - { - ASSERTL0(false, "VTK output needs settig up for PtsPlane"); - break; - } - case LibUtilities::ePtsBox: - { - ASSERTL0(false,"VTK output needs settig up for PtsBox"); - break; - } - case LibUtilities::ePtsSegBlock: - { - nvert = 2; - vtktype = 3; - break; - } - case LibUtilities::ePtsTriBlock: - { - nvert = 3; - vtktype = 5; - break; - } - case LibUtilities::ePtsTetBlock: - { - nvert = 4; - vtktype = 10; - break; - } - default: - ASSERTL0(false, "ptsType not supported yet."); + nvert = 4; + vtktype = 10; + break; } + default: + ASSERTL0(false, "ptsType not supported yet."); + } + + vector > ptsConn; + fPts->GetConnectivity(ptsConn); - vector > ptsConn; - fPts->GetConnectivity(ptsConn); + nfields = fPts->GetNFields(); - nfields = fPts->GetNFields(); + int nPts = fPts->GetNpoints(); + int numBlocks = 0; + for (i = 0; i < ptsConn.size(); ++i) + { + numBlocks += ptsConn[i].num_elements() / nvert; + } - int nPts = fPts->GetNpoints(); - int numBlocks = 0; - for (i = 0; i < ptsConn.size(); ++i) + // write out pieces of data. + outfile << " " << endl; + outfile << " " << endl; + outfile << " " + << endl; + for (i = 0; i < nPts; ++i) + { + for (j = 0; j < dim; ++j) { - numBlocks += ptsConn[i].num_elements() / nvert; + outfile << " " << setprecision(8) << scientific + << fPts->GetPointVal(j, i) << " "; } - - // write out pieces of data. - outfile << " " << endl; - outfile << " " << endl; - outfile << " " - << endl; - for (i = 0; i < nPts; ++i) + for (j = dim; j < 3; ++j) { - for (j = 0; j < dim; ++j) - { - outfile << " " << setprecision(8) << scientific - << fPts->GetPointVal(j, i) << " "; - } - for (j = dim; j < 3; - ++j) // pack to 3D since paraview does not seem to handle 2D - { - outfile << " 0.000000"; - } - outfile << endl; + // pack to 3D since paraview does not seem to handle 2D + outfile << " 0.000000"; } - outfile << " " << endl; - outfile << " " << endl; - outfile << " " << endl; - outfile << " " << endl; + outfile << endl; + } + outfile << " " << endl; + outfile << " " << endl; + outfile << " " << endl; + outfile << " " << endl; - // dump connectivity data if it exists - outfile << " "; - int cnt = 1; - for (i = 0; i < ptsConn.size(); ++i) + // dump connectivity data if it exists + outfile << " "; + int cnt = 1; + for (i = 0; i < ptsConn.size(); ++i) + { + for (j = 0; j < ptsConn[i].num_elements(); ++j) { - for (j = 0; j < ptsConn[i].num_elements(); ++j) + outfile << ptsConn[i][j] << " "; + if ((!(cnt % nvert)) && cnt) { - outfile << ptsConn[i][j] << " "; - if ((!(cnt % nvert)) && cnt) - { - outfile << std::endl; - outfile << " "; - } - cnt++; + outfile << std::endl; + outfile << " "; } + cnt++; } - outfile << " " << endl; - outfile << " " << endl; + } + outfile << " " << endl; + outfile << " " << endl; + outfile << " "; + for (i = 0; i < numBlocks; ++i) + { + outfile << i * nvert + nvert << " "; + } + outfile << endl; + outfile << " " << endl; + outfile << " " << endl; + outfile << " "; + for (i = 0; i < numBlocks; ++i) + { + outfile << vtktype << " "; + } + outfile << endl; + outfile << " " << endl; + outfile << " " << endl; + outfile << " " << endl; + + // printing the fields + for (j = 0; j < nfields; ++j) + { + outfile << " m_variables[j] << "\">" << endl; outfile << " "; - for (i = 0; i < numBlocks; ++i) - { - outfile << i * nvert + nvert << " "; - } - outfile << endl; - outfile << " " << endl; - outfile << " " << endl; - outfile << " "; - for (i = 0; i < numBlocks; ++i) + for (i = 0; i < fPts->GetNpoints(); ++i) { - outfile << vtktype << " "; + outfile << fPts->GetPointVal(dim + j, i) << " "; } outfile << endl; outfile << " " << endl; - outfile << " " << endl; - outfile << " " << endl; + } - // printing the fields - for (j = 0; j < nfields; ++j) + outfile << " " << endl; + outfile << " " << endl; + + WriteVtkFooter(outfile); + cout << "Written file: " << filename << endl; + + // output parallel outline info if necessary + if ( (m_f->m_comm->GetRank() == 0) && + (m_f->m_comm->GetSize() != 1)) + { + WritePVtu(vm); + cout << "Written file: " << filename << endl; + } +} + +void OutputVtk::OutputFromExp(po::variables_map &vm) +{ + int i,j; + // Extract the output filename and extension + string filename = PrepareOutput(vm); + + // Write solution. + ofstream outfile(filename.c_str()); + WriteVtkHeader(outfile); + int nfields = m_f->m_variables.size(); + + int nstrips; + m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); + + // Homogeneous strip variant + for (int s = 0; s < nstrips; ++s) + { + // For each field write out field data for each expansion. + for (i = 0; i < m_f->m_exp[0]->GetNumElmts(); ++i) { - fieldname.push_back(fPts->GetFieldName(j)); - outfile << " GetFieldName(j) << "\">" << endl; - outfile << " "; - for (i = 0; i < fPts->GetNpoints(); ++i) + m_f->m_exp[0]->WriteVtkPieceHeader(outfile, i, s); + + // For this expansion write out each field. + for (j = 0; j < nfields; ++j) { - outfile << fPts->GetPointVal(dim + j, i) << " "; + m_f->m_exp[s * nfields + j]->WriteVtkPieceData( + outfile, i, m_f->m_variables[j]); } - outfile << endl; - outfile << " " << endl; + m_f->m_exp[0]->WriteVtkPieceFooter(outfile, i); } + } - outfile << " " << endl; - outfile << " " << endl; + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + WriteEmptyVtkPiece(outfile); } - m_f->m_exp[0]->WriteVtkFooter(outfile); + WriteVtkFooter(outfile); cout << "Written file: " << filename << endl; // output parallel outline info if necessary - if (m_f->m_comm->GetRank() == 0) + if ( (m_f->m_comm->GetRank() == 0) && + (m_f->m_comm->GetSize() != 1)) { - ASSERTL1(fieldname.size() == nfields, - "fieldname not the same size as nfields"); - int nprocs = m_f->m_comm->GetSize(); - if (nprocs != 1) - { - filename = m_config["outfile"].as(); - int dot = filename.find_last_of('.'); - string body = filename.substr(0, dot); - filename = body + ".pvtu"; - - ofstream outfile(filename.c_str()); - - outfile << "" << endl; - outfile << "" << endl; - outfile << "" << endl; - outfile << " " << endl; - outfile << " " << endl; - outfile << "" << endl; - outfile << "" << endl; - outfile << "" - << endl; - outfile << "" - << endl; - outfile << "" - << endl; - outfile << "" << endl; - outfile << "" << endl; - for (int i = 0; i < nfields; ++i) - { - outfile << "" << endl; - } - outfile << "" << endl; + WritePVtu(vm); + } +} - for (int i = 0; i < nprocs; ++i) - { - boost::format pad("P%1$07d.vtu"); - pad % i; - outfile << "" << endl; - } - outfile << "" << endl; - outfile << "" << endl; - cout << "Written file: " << filename << endl; - } +void OutputVtk::OutputFromData(po::variables_map &vm) +{ + ASSERTL0(false, "OutputVtk can't write using only FieldData."); +} + +fs::path OutputVtk::GetPath(std::string &filename, + po::variables_map &vm) +{ + int nprocs = m_f->m_comm->GetSize(); + fs::path specPath; + if (nprocs == 1) + { + specPath = fs::path(filename); + } + else + { + // replace .vtu by _vtu + int dot = filename.find_last_of('.'); + string path = filename.substr(0, dot) + "_vtu"; + specPath = fs::path(path); + } + return fs::path(specPath); +} + +fs::path OutputVtk::GetFullOutName(std::string &filename, + po::variables_map &vm) +{ + int nprocs = m_f->m_comm->GetSize(); + + fs::path fulloutname; + if (nprocs == 1) + { + fulloutname = filename; } + else + { + // Guess at filename that might belong to this process. + boost::format pad("P%1$07d.%2$s"); + pad % m_f->m_comm->GetRank() % "vtu"; + + // Generate full path name + fs::path specPath = GetPath(filename, vm); + fs::path poutfile(pad.str()); + fulloutname = specPath / poutfile; + } + return fulloutname; +} + +void OutputVtk::WriteVtkHeader(std::ostream &outfile) +{ + outfile << "" << endl; + outfile << "" << endl; + outfile << " " << endl; +} + +void OutputVtk::WriteVtkFooter(std::ostream &outfile) +{ + outfile << " " << endl; + outfile << "" << endl; } void OutputVtk::WriteEmptyVtkPiece(std::ofstream &outfile) @@ -407,5 +363,90 @@ void OutputVtk::WriteEmptyVtkPiece(std::ofstream &outfile) outfile << " " << endl; outfile << " " << endl; } + +void OutputVtk::WritePVtu(po::variables_map &vm) +{ + string filename = m_config["outfile"].as(); + int dot = filename.find_last_of('.'); + string body = filename.substr(0, dot); + filename = body + ".pvtu"; + + ofstream outfile(filename.c_str()); + + int nprocs = m_f->m_comm->GetSize(); + string path = LibUtilities::PortablePath(GetPath(filename,vm)); + + outfile << "" << endl; + outfile << "" << endl; + outfile << "" << endl; + outfile << " " << endl; + outfile << " " << endl; + outfile << "" << endl; + outfile << "" << endl; + outfile << "" + << endl; + outfile << "" + << endl; + outfile << "" + << endl; + outfile << "" << endl; + outfile << "" << endl; + for (int i = 0; i < m_f->m_variables.size(); ++i) + { + outfile << "m_variables[i] << "\"/>" << endl; + } + outfile << "" << endl; + + for (int i = 0; i < nprocs; ++i) + { + boost::format pad("P%1$07d.vtu"); + pad % i; + outfile << "" << endl; + } + outfile << "" << endl; + outfile << "" << endl; + + cout << "Written file: " << filename << endl; +} + +std::string OutputVtk::PrepareOutput(po::variables_map &vm) +{ + // Extract the output filename and extension + string filename = m_config["outfile"].as(); + + fs::path specPath = GetPath(filename,vm); + fs::path fulloutname = GetFullOutName(filename,vm); + filename = LibUtilities::PortablePath(fulloutname); + + if (m_f->m_comm->GetSize() != 1) + { + if (m_f->m_comm->TreatAsRankZero()) + { + try + { + fs::create_directory(specPath); + } + catch (fs::filesystem_error &e) + { + ASSERTL0(false, "Filesystem error: " + string(e.what())); + } + cout << "Writing files to directory: " << specPath << endl; + } + m_f->m_comm->Block(); + } + else + { + cout << "Writing: " << specPath << endl; + } + return filename; +} + } } diff --git a/library/FieldUtils/OutputModules/OutputVtk.h b/library/FieldUtils/OutputModules/OutputVtk.h index 3a4f579c6a137bc9769e8b5a89f02b35119f7e8f..18c2193017c307f7f9d071a4f77a0cb97099d68b 100644 --- a/library/FieldUtils/OutputModules/OutputVtk.h +++ b/library/FieldUtils/OutputModules/OutputVtk.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_OUTPUTVTK #define FIELDUTILS_OUTPUTVTK -#include "../Module.h" +#include "OutputFileBase.h" #include namespace Nektar @@ -45,7 +45,7 @@ namespace FieldUtils { /// Converter from fld to vtk. -class OutputVtk : public OutputModule +class OutputVtk : public OutputFileBase { public: /// Creates an instance of this class @@ -58,15 +58,37 @@ public: OutputVtk(FieldSharedPtr f); virtual ~OutputVtk(); - /// Write fld to output file. - virtual void Process(po::variables_map &vm); - - void WriteEmptyVtkPiece(std::ofstream &outfile); - virtual std::string GetModuleName() { return "OutputVtk"; } + +protected: + /// Write from pts to output file. + virtual void OutputFromPts(po::variables_map &vm); + + /// Write from m_exp to output file. + virtual void OutputFromExp(po::variables_map &vm); + + /// Write from data to output file. + virtual void OutputFromData(po::variables_map &vm); + + virtual fs::path GetPath(std::string &filename, + po::variables_map &vm); + + virtual fs::path GetFullOutName(std::string &filename, + po::variables_map &vm); + +private: + void WriteVtkHeader(std::ostream &outfile); + + void WriteVtkFooter(std::ostream &outfile); + + void WriteEmptyVtkPiece(std::ofstream &outfile); + + void WritePVtu(po::variables_map &vm); + + std::string PrepareOutput(po::variables_map &vm); }; } } diff --git a/library/FieldUtils/OutputModules/OutputXml.cpp b/library/FieldUtils/OutputModules/OutputXml.cpp index 37c2bb79063d728bb67827b25334ee68932b6128..d2c25356116642085de912cdf71ccd11755aa3b7 100644 --- a/library/FieldUtils/OutputModules/OutputXml.cpp +++ b/library/FieldUtils/OutputModules/OutputXml.cpp @@ -62,14 +62,6 @@ void OutputXml::Process(po::variables_map &vm) return; } - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "OutputXml: Writing file..." << endl; - } - } - // Extract the output filename and extension string filename = m_config["outfile"].as(); diff --git a/library/FieldUtils/OutputModules/OutputXml.h b/library/FieldUtils/OutputModules/OutputXml.h index 5e9d88f0e02809d2bfe3bcee9f7cf89ee76880e5..fd502809510a4275cb4a92595958b07e7cd5693e 100644 --- a/library/FieldUtils/OutputModules/OutputXml.h +++ b/library/FieldUtils/OutputModules/OutputXml.h @@ -64,6 +64,17 @@ public: { return "OutputXml"; } + + virtual std::string GetModuleDescription() + { + return "Writing file"; + } + + virtual ModulePriority GetModulePriority() + { + return eOutput; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessAddCompositeID.cpp b/library/FieldUtils/ProcessModules/ProcessAddCompositeID.cpp index 752bda7c20ba467257d9f9e5ddb25973399f5981..45752b026ed1061b346123d37fdcb94759956640 100644 --- a/library/FieldUtils/ProcessModules/ProcessAddCompositeID.cpp +++ b/library/FieldUtils/ProcessModules/ProcessAddCompositeID.cpp @@ -66,26 +66,21 @@ ProcessAddCompositeID::~ProcessAddCompositeID() void ProcessAddCompositeID::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int nfields = m_f->m_variables.size(); + m_f->m_variables.push_back("compositeID"); + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->GetRank() == 0) - { - cout << "ProcessAddCompositeID: Adding composite ID as a new field" - << endl; - } + return; } - int nfields = 0; - int NumHomogeneousDir = 0; + int NumHomogeneousDir = m_f->m_numHomogeneousDir; MultiRegions::ExpListSharedPtr exp; - if (m_f->m_fielddef.size()) + if (nfields) { - nfields = m_f->m_fielddef[0]->m_fields.size(); - NumHomogeneousDir = m_f->m_fielddef[0]->m_numHomogeneousDir; - m_f->m_exp.resize(nfields + 1); - exp = m_f->AppendExpList(NumHomogeneousDir, "Composite ID"); + exp = m_f->AppendExpList(NumHomogeneousDir); m_f->m_exp[nfields] = exp; } @@ -106,7 +101,7 @@ void ProcessAddCompositeID::Process(po::variables_map &vm) { LocalRegions::ExpansionSharedPtr elmt = exp->GetExp(n); - // loop over composite list and search for geomtry pointer in list + // loop over composite list and search for geometry pointer in list for (it = CompositeMap.begin(); it != CompositeMap.end(); ++it) { if (find(it->second->begin(), it->second->end(), elmt->GetGeom()) != @@ -130,30 +125,6 @@ void ProcessAddCompositeID::Process(po::variables_map &vm) // forward transform exp->FwdTrans_IterPerExp(exp->GetPhys(), exp->UpdateCoeffs()); - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - // copy in previous fields if they exist. - for (int i = 0; i < nfields; ++i) - { - for (int j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back(m_f->m_fielddef[0]->m_fields[i]); - m_f->m_exp[i]->AppendFieldData(FieldDef[j], FieldData[j]); - } - } - - // append composite id field - for (int j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back("compositeID"); - m_f->m_exp[nfields]->AppendFieldData(FieldDef[j], FieldData[j]); - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessAddCompositeID.h b/library/FieldUtils/ProcessModules/ProcessAddCompositeID.h index baba5514be6167eb649528ab97fdde9295410db6..096f34731b53f3215f4336dd5c1ac525c012727f 100644 --- a/library/FieldUtils/ProcessModules/ProcessAddCompositeID.h +++ b/library/FieldUtils/ProcessModules/ProcessAddCompositeID.h @@ -68,6 +68,16 @@ public: return "ProcessAddCompositeID"; } + virtual std::string GetModuleDescription() + { + return "Adding composite ID as a new field"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessAddFld.cpp b/library/FieldUtils/ProcessModules/ProcessAddFld.cpp index 1b7613de40baf2026c4693728da622bfcb3993ae..cd61c17dc6fa957f4b8498f531d3a7d97971fe3a 100644 --- a/library/FieldUtils/ProcessModules/ProcessAddFld.cpp +++ b/library/FieldUtils/ProcessModules/ProcessAddFld.cpp @@ -60,8 +60,14 @@ ProcessAddFld::ProcessAddFld(FieldSharedPtr f) : ProcessModule(f) m_config["fromfld"] = ConfigOption(false, "NotSet", "Fld file form which to add field"); - ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, - "Need to specify fromfld=file.fld "); + if(f->m_inputfiles.count("xml")) + { + m_priority = eModifyExp; + } + else + { + m_priority = eModifyFieldData; + } } ProcessAddFld::~ProcessAddFld() @@ -70,54 +76,62 @@ ProcessAddFld::~ProcessAddFld() void ProcessAddFld::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessAddFld: Adding new fld to input fld..." << endl; - } - } - - ASSERTL0(m_f->m_data.size() != 0, "No input data defined"); - string scalestr = m_config["scale"].as(); NekDouble scale = boost::lexical_cast(scalestr); + ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, + "Need to specify fromfld=file.fld "); string fromfld = m_config["fromfld"].as(); - FieldSharedPtr fromField = boost::shared_ptr(new Field()); - if (m_f->m_exp.size()) + vector fromFieldDef; + vector > fromFieldData; + + if (m_f->m_graph) { - // Set up ElementGIDs in case of parallel processing - Array ElementGIDs(m_f->m_exp[0]->GetExpSize()); - for (int i = 0; i < m_f->m_exp[0]->GetExpSize(); ++i) + const SpatialDomains::ExpansionMap &expansions = + m_f->m_graph->GetExpansions(); + + // if Range has been speficied it is possible to have a + // partition which is empty so check this and return if + // no elements present. + + if (!expansions.size()) { - ElementGIDs[i] = m_f->m_exp[0]->GetExp(i)->GetGeom()->GetGlobalID(); + return; + } + + Array ElementGIDs(expansions.size()); + SpatialDomains::ExpansionMap::const_iterator expIt; + + int i = 0; + for (expIt = expansions.begin(); expIt != expansions.end(); ++expIt) + { + ElementGIDs[i++] = expIt->second->m_geomShPtr->GetGlobalID(); } m_f->FieldIOForFile(fromfld)->Import( - fromfld, fromField->m_fielddef, fromField->m_data, + fromfld, fromFieldDef, fromFieldData, LibUtilities::NullFieldMetaDataMap, ElementGIDs); } else { m_f->FieldIOForFile(fromfld)->Import( - fromfld, fromField->m_fielddef, fromField->m_data, + fromfld, fromFieldDef, fromFieldData, LibUtilities::NullFieldMetaDataMap); } bool samelength = true; - if (fromField->m_data.size() != m_f->m_data.size()) + if (fromFieldData.size() != m_f->m_data.size()) { samelength = false; } // scale input field - for (int i = 0; i < fromField->m_data.size(); ++i) + for (int i = 0; i < fromFieldData.size(); ++i) { - int datalen = fromField->m_data[i].size(); + int datalen = fromFieldData[i].size(); - Vmath::Smul(datalen, scale, &(fromField->m_data[i][0]), 1, - &(fromField->m_data[i][0]), 1); + Vmath::Smul(datalen, scale, &(fromFieldData[i][0]), 1, + &(fromFieldData[i][0]), 1); if (samelength) { @@ -128,23 +142,29 @@ void ProcessAddFld::Process(po::variables_map &vm) } } - if (samelength == true) + if (m_priority == eModifyFieldData) { + ASSERTL0(samelength == true, + "Input fields have partitions of different length and so xml " + "file needs to be specified"); for (int i = 0; i < m_f->m_data.size(); ++i) { int datalen = m_f->m_data[i].size(); Vmath::Vadd(datalen, &(m_f->m_data[i][0]), 1, - &(fromField->m_data[i][0]), 1, &(m_f->m_data[i][0]), 1); + &(fromFieldData[i][0]), 1, &(m_f->m_data[i][0]), 1); } + } else { - ASSERTL0(m_f->m_exp.size() != 0, - "Input fields have partitions of different length and so xml " - "file needs to be specified"); + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; + } - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int ncoeffs = m_f->m_exp[0]->GetNcoeffs(); Array SaveFld(ncoeffs); @@ -152,52 +172,30 @@ void ProcessAddFld::Process(po::variables_map &vm) { Vmath::Vcopy(ncoeffs, m_f->m_exp[j]->GetCoeffs(), 1, SaveFld, 1); - // since expansion is set up according to m_f search for same - // variable in new field - int nfield; - for (nfield = 0; nfield < fromField->m_fielddef[0]->m_fields.size(); - ++nfield) - { - if (fromField->m_fielddef[0]->m_fields[nfield] == - m_f->m_fielddef[0]->m_fields[j]) - { - break; - } - } + // Check if new field has this variable + vector::iterator it = + find (fromFieldDef[0]->m_fields.begin(), + fromFieldDef[0]->m_fields.end(), + m_f->m_variables[j]); - ASSERTL0(nfield != fromField->m_fielddef[0]->m_fields.size(), - "Could not find field " + m_f->m_fielddef[0]->m_fields[j] + - " in from field"); + ASSERTL0(it != fromFieldDef[0]->m_fields.end(), + "Could not find field " + m_f->m_variables[j] + " in from field"); // load new field - for (int i = 0; i < fromField->m_data.size(); ++i) + for (int i = 0; i < fromFieldData.size(); ++i) { m_f->m_exp[j]->ExtractDataToCoeffs( - fromField->m_fielddef[i], fromField->m_data[i], - fromField->m_fielddef[i]->m_fields[nfield], + fromFieldDef[i], fromFieldData[i], + m_f->m_variables[j], m_f->m_exp[j]->UpdateCoeffs()); } Vmath::Vadd(ncoeffs, m_f->m_exp[j]->GetCoeffs(), 1, SaveFld, 1, m_f->m_exp[j]->UpdateCoeffs(), 1); + m_f->m_exp[j]->BwdTrans( + m_f->m_exp[j]->GetCoeffs(), + m_f->m_exp[j]->UpdatePhys()); } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int i = 0; i < nfields; ++i) - { - for (int j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[i]); - m_f->m_exp[i]->AppendFieldData(FieldDef[j], FieldData[j]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessAddFld.h b/library/FieldUtils/ProcessModules/ProcessAddFld.h index c748f445a2e7ca1e150f08fd191186e0c16a5bf0..ed5dfab8f8648cba33a15520e7c61bc3b4556e98 100644 --- a/library/FieldUtils/ProcessModules/ProcessAddFld.h +++ b/library/FieldUtils/ProcessModules/ProcessAddFld.h @@ -68,7 +68,19 @@ public: return "ProcessAddFld"; } + virtual std::string GetModuleDescription() + { + return "Adding new fld to input fld"; + } + + virtual ModulePriority GetModulePriority() + { + return m_priority; + } + private: + ModulePriority m_priority; + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.cpp b/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.cpp index 89ecb2825b5463a6da8d25e3bbc4b70da3e22938..53254f31b19ebbd1799c2f0173608cb6e7318233 100644 --- a/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.cpp +++ b/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.cpp @@ -57,9 +57,7 @@ ProcessBoundaryExtract::ProcessBoundaryExtract(FieldSharedPtr f) : ProcessModule(f) { // set up dafault values. - m_config["bnd"] = ConfigOption(false, "All", "Boundary to be extracted"); - m_config["fldtoboundary"] = - ConfigOption(true, "NotSet", "Extract fld values to boundary"); + m_config["bnd"] = ConfigOption(false, "All", "Boundary to be processed"); m_config["addnormals"] = ConfigOption(true, "NotSet", "Add normals to output"); @@ -74,45 +72,12 @@ ProcessBoundaryExtract::~ProcessBoundaryExtract() void ProcessBoundaryExtract::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessBoundaryExtract: Setting up boundary extraction..." - << endl; - } - } - - m_f->m_fldToBnd = m_config["fldtoboundary"].m_beenSet; m_f->m_addNormals = m_config["addnormals"].m_beenSet; - // check for correct input files - if ((m_f->m_inputfiles.count("xml") == 0) && - (m_f->m_inputfiles.count("xml.gz") == 0)) - { - cout << "An xml or xml.gz input file must be specified for the " - "boundary extraction module" - << endl; - exit(3); - } - - if (m_f->m_fldToBnd) - { - if ((m_f->m_inputfiles.count("fld") == 0) && - (m_f->m_inputfiles.count("chk") == 0) && - (m_f->m_inputfiles.count("rst") == 0)) - { - cout << "A fld or chk or rst input file must be specified for " - << "the boundary extraction module with fldtoboundary option." - << endl; - - exit(3); - } - } - // Set up Field options to output boundary fld string bvalues = m_config["bnd"].as(); + vector bndRegions; if (boost::iequals(bvalues, "All")) { int numBndExp = 0; @@ -128,26 +93,44 @@ void ProcessBoundaryExtract::Process(po::variables_map &vm) numBndExp = max(numBndExp, breg_it->first); } // assuming all boundary regions are consecutive number if - // regions is one more tham maximum id + // regions is one more than maximum id numBndExp++; // not all partitions in parallel touch all boundaries so // find maximum number of boundaries - m_f->m_session->GetComm()->AllReduce(numBndExp, - LibUtilities::ReduceMax); + m_f->m_comm->AllReduce(numBndExp, LibUtilities::ReduceMax); // THis presumes boundary regions are numbered consecutively for (int i = 0; i < numBndExp; ++i) { - m_f->m_bndRegionsToWrite.push_back(i); + bndRegions.push_back(i); } } else { ASSERTL0(ParseUtils::GenerateOrderedVector(bvalues.c_str(), - m_f->m_bndRegionsToWrite), + bndRegions), "Failed to interpret bnd values string"); } + + if(m_f->m_bndRegionsToWrite.size()) + { + // This was already called. Just check if the bnd option is the same + ASSERTL0(m_f->m_bndRegionsToWrite == bndRegions, + "Incompatible bnd parameters."); + } + else + { + m_f->m_bndRegionsToWrite = bndRegions; + + if (m_f->m_exp[0]->GetNumElmts() != 0) + { + for (int i = 0; i < m_f->m_exp.size(); ++i) + { + m_f->m_exp[i]->FillBndCondFromField(); + } + } + } } } } diff --git a/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.h b/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.h index 6eb56ca12a17190a24ed813be3dcef2d68d58d75..6207938ec287b5ac7165fde5be08d9538c5c86c5 100644 --- a/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.h +++ b/library/FieldUtils/ProcessModules/ProcessBoundaryExtract.h @@ -65,6 +65,17 @@ public: { return "ProcessBoundaryExtract"; } + + virtual std::string GetModuleDescription() + { + return "Setting up boundary extraction"; + } + + virtual ModulePriority GetModulePriority() + { + return eBndExtraction; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessC0Projection.cpp b/library/FieldUtils/ProcessModules/ProcessC0Projection.cpp index ea7a73e3d88e0422471563f7cc116f46f9259671..5cd492d44fd3276f6a8e94a5bc13b09fc31fd41b 100644 --- a/library/FieldUtils/ProcessModules/ProcessC0Projection.cpp +++ b/library/FieldUtils/ProcessModules/ProcessC0Projection.cpp @@ -74,13 +74,10 @@ ProcessC0Projection::~ProcessC0Projection() void ProcessC0Projection::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessC0Projection: Projecting field into C0 space..." - << endl; - } + return; } // ensure not using diagonal preconditioner since tends not to converge fo @@ -136,7 +133,7 @@ void ProcessC0Projection::Process(po::variables_map &vm) m_f->m_declareExpansionAsContField = true; m_f->m_requireBoundaryExpansion = false; C0ProjectExp[0] = m_f->AppendExpList( - m_f->m_fielddef[0]->m_numHomogeneousDir, "DefaultVar", true); + m_f->m_numHomogeneousDir, "DefaultVar", true); m_f->m_declareExpansionAsContField = savedef; m_f->m_requireBoundaryExpansion = savedef2; for (int i = 1; i < nfields; ++i) @@ -212,10 +209,6 @@ void ProcessC0Projection::Process(po::variables_map &vm) Velocity[j] = Array(npoints, 0.0); } - C0ProjectExp[processFields[i]]->BwdTrans( - m_f->m_exp[processFields[i]]->GetCoeffs(), - m_f->m_exp[processFields[i]]->UpdatePhys()); - Vmath::Smul(npoints, -lambda, m_f->m_exp[processFields[i]]->GetPhys(), 1, forcing, 1); @@ -233,33 +226,16 @@ void ProcessC0Projection::Process(po::variables_map &vm) } else { - C0ProjectExp[processFields[i]]->BwdTrans( - m_f->m_exp[processFields[i]]->GetCoeffs(), - m_f->m_exp[processFields[i]]->UpdatePhys()); C0ProjectExp[processFields[i]]->FwdTrans( m_f->m_exp[processFields[i]]->GetPhys(), m_f->m_exp[processFields[i]]->UpdateCoeffs()); } } + C0ProjectExp[processFields[i]]->BwdTrans( + m_f->m_exp[processFields[i]]->GetCoeffs(), + m_f->m_exp[processFields[i]]->UpdatePhys()); } - // reset FieldDef in case of serial input and parallel output - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - // reset up FieldData with new values before projecting - std::vector > FieldData(FieldDef.size()); - - for (int i = 0; i < nfields; ++i) - { - for (int j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back(m_f->m_fielddef[0]->m_fields[i]); - m_f->m_exp[i]->AppendFieldData(FieldDef[j], FieldData[j]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessC0Projection.h b/library/FieldUtils/ProcessModules/ProcessC0Projection.h index d10caa9d4335a7da5beab149c9da2f6a7cc2b3eb..f0660c2ff1757b082d343a5f389a0b22284b8c3d 100644 --- a/library/FieldUtils/ProcessModules/ProcessC0Projection.h +++ b/library/FieldUtils/ProcessModules/ProcessC0Projection.h @@ -68,6 +68,16 @@ public: return "ProcessC0Projection"; } + virtual std::string GetModuleDescription() + { + return "Projecting field into C0 space"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessCombineAvg.cpp b/library/FieldUtils/ProcessModules/ProcessCombineAvg.cpp index 5a461bea2b5a328943903b13f4d745ee8132c000..6c508441e1d4c3fa7957c0a394b631f7a386b46b 100644 --- a/library/FieldUtils/ProcessModules/ProcessCombineAvg.cpp +++ b/library/FieldUtils/ProcessModules/ProcessCombineAvg.cpp @@ -58,9 +58,6 @@ ProcessCombineAvg::ProcessCombineAvg(FieldSharedPtr f) : ProcessModule(f) { m_config["fromfld"] = ConfigOption(false, "NotSet", "Fld file form which to add field"); - - ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, - "Need to specify fromfld=file.fld "); } ProcessCombineAvg::~ProcessCombineAvg() @@ -69,25 +66,22 @@ ProcessCombineAvg::~ProcessCombineAvg() void ProcessCombineAvg::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessCombineAvg: Combining new fld into input avg fld..." - << endl; - } + return; } - ASSERTL0(m_f->m_exp.size() != 0, "No input expansion defined"); + ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, + "Need to specify fromfld=file.fld "); - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int nq = m_f->m_exp[0]->GetTotPoints(); int expdim = m_f->m_graph->GetMeshDimension(); int spacedim = expdim; - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) + if ((m_f->m_numHomogeneousDir) == 1 || (m_f->m_numHomogeneousDir) == 2) { - spacedim += m_f->m_fielddef[0]->m_numHomogeneousDir; + spacedim += m_f->m_numHomogeneousDir; } // Allocate storage for new field and correction (for Reynolds stress) @@ -119,7 +113,7 @@ void ProcessCombineAvg::Process(po::variables_map &vm) for (int j = 0; j < nfields; ++j) { ASSERTL0(fromField->m_fielddef[0]->m_fields[j] == - m_f->m_fielddef[0]->m_fields[j], + m_f->m_variables[j], "Field names do not match."); // load new field (overwrite m_f->m_exp coeffs for now) @@ -127,7 +121,7 @@ void ProcessCombineAvg::Process(po::variables_map &vm) { m_f->m_exp[j]->ExtractDataToCoeffs( fromField->m_fielddef[i], fromField->m_data[i], - fromField->m_fielddef[i]->m_fields[j], + m_f->m_variables[j], m_f->m_exp[j]->UpdateCoeffs()); } m_f->m_exp[j]->BwdTrans(m_f->m_exp[j]->GetCoeffs(), fromPhys[j]); @@ -148,7 +142,7 @@ void ProcessCombineAvg::Process(po::variables_map &vm) int stress = -1; for (int j = 0; j < nfields; ++j) { - if (m_f->m_fielddef[0]->m_fields[j] == "uu") + if (m_f->m_variables[j] == "uu") { stress = j; break; @@ -246,21 +240,6 @@ void ProcessCombineAvg::Process(po::variables_map &vm) boost::lexical_cast(finTime); } - // Update field def and data - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - for (int i = 0; i < nfields; ++i) - { - for (int j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back(m_f->m_fielddef[0]->m_fields[i]); - m_f->m_exp[i]->AppendFieldData(FieldDef[j], FieldData[j]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessCombineAvg.h b/library/FieldUtils/ProcessModules/ProcessCombineAvg.h index c059b1cc4387be3df94302ef006a9723a63c333b..383dcfdadc5e73168411b29dca641530566f8b6d 100644 --- a/library/FieldUtils/ProcessModules/ProcessCombineAvg.h +++ b/library/FieldUtils/ProcessModules/ProcessCombineAvg.h @@ -69,6 +69,16 @@ public: return "ProcessCombineAvg"; } + virtual std::string GetModuleDescription() + { + return "Combining new fld into input avg fld"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessConcatenateFld.cpp b/library/FieldUtils/ProcessModules/ProcessConcatenateFld.cpp index fec06ab3c49072e39baa152a2795da25a80d7aba..dc494d783cacc6b4a84bb77d6e3c3e30e29b9ed9 100644 --- a/library/FieldUtils/ProcessModules/ProcessConcatenateFld.cpp +++ b/library/FieldUtils/ProcessModules/ProcessConcatenateFld.cpp @@ -51,31 +51,11 @@ ModuleKey ProcessConcatenateFld::className = GetModuleFactory().RegisterCreatorFunction( ModuleKey(eProcessModule, "concatenate"), ProcessConcatenateFld::create, - "Concatenate field file into single file"); + "Concatenate field file into single file (deprecated)"); ProcessConcatenateFld::ProcessConcatenateFld(FieldSharedPtr f) : ProcessModule(f) { - // check for correct input files - if ((f->m_inputfiles.count("xml") == 0) && - (f->m_inputfiles.count("xml.gz") == 0)) - { - cout << "An xml or xml.gz input file must be specified for the " - "concatenate module" - << endl; - exit(3); - } - - if ((f->m_inputfiles.count("fld") == 0) && - (f->m_inputfiles.count("chk") == 0) && - (f->m_inputfiles.count("rst") == 0)) - { - cout << "A fld or chk or rst input file must be specified for the " - "concatenate module" - << endl; - - exit(3); - } } ProcessConcatenateFld::~ProcessConcatenateFld() @@ -84,33 +64,11 @@ ProcessConcatenateFld::~ProcessConcatenateFld() void ProcessConcatenateFld::Process(po::variables_map &vm) { - if (m_f->m_verbose) + if(m_f->m_comm->TreatAsRankZero()) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessConcatenateFld: Concatenating field file..." - << endl; - } + cout << "Concatenate module is not needed. Instead, use " << endl + << "\tFieldConvert file1.fld file2.fld file-conc.fld" << endl; } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - // Copy Data into FieldData and set variable - for (int j = 0; j < m_f->m_exp.size(); ++j) - { - for (int i = 0; i < FieldDef.size(); ++i) - { - // Could do a search here to find correct variable - FieldDef[i]->m_fields.push_back(m_f->m_fielddef[0]->m_fields[j]); - m_f->m_exp[0]->AppendFieldData(FieldDef[i], FieldData[i], - m_f->m_exp[j]->UpdateCoeffs()); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessConcatenateFld.h b/library/FieldUtils/ProcessModules/ProcessConcatenateFld.h index 3ebfba93c2d660e3325f6ad86b24eddd69053cbd..3627c2a73e540e21e483e086fcf7fc148ef912b8 100644 --- a/library/FieldUtils/ProcessModules/ProcessConcatenateFld.h +++ b/library/FieldUtils/ProcessModules/ProcessConcatenateFld.h @@ -65,6 +65,17 @@ public: { return "ProcessConcatenateFld"; } + + virtual std::string GetModuleDescription() + { + return "Concatenating field file"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyFieldData; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessCreateExp.cpp b/library/FieldUtils/ProcessModules/ProcessCreateExp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7750b2932f7e332703e46340c4eae22e34492d2c --- /dev/null +++ b/library/FieldUtils/ProcessModules/ProcessCreateExp.cpp @@ -0,0 +1,271 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: ProcessCreateExp.cpp +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Dummy module to create m_exp. +// +//////////////////////////////////////////////////////////////////////////////// + +#include +#include +using namespace std; + +#include "ProcessCreateExp.h" + +#include +#include +#include + +namespace Nektar +{ +namespace FieldUtils +{ + +ModuleKey ProcessCreateExp::className = + GetModuleFactory().RegisterCreatorFunction( + ModuleKey(eProcessModule, "createExp"), + ProcessCreateExp::create, + "dummy module used to create m_exp."); + +ProcessCreateExp::ProcessCreateExp(FieldSharedPtr f) : ProcessModule(f) +{ +} + +ProcessCreateExp::~ProcessCreateExp() +{ +} + +void ProcessCreateExp::Process(po::variables_map &vm) +{ + if(m_f->m_graph) + { + int i, j; + LibUtilities::Timer timerpart; + if (m_f->m_verbose) + { + if (m_f->m_comm->TreatAsRankZero()) + { + timerpart.Start(); + } + } + + // check to see if fld file defined so can use in + // expansion defintion if required + bool fldfilegiven = (m_f->m_fielddef.size() != 0); + + // currently load all field (possibly could read data from + // expansion list but it is re-arranged in expansion) + const SpatialDomains::ExpansionMap &expansions = + m_f->m_graph->GetExpansions(); + + // load fielddef header if fld file is defined. This gives + // precedence to Homogeneous definition in fld file + m_f->m_numHomogeneousDir = 0; + if (fldfilegiven) + { + m_f->m_numHomogeneousDir = m_f->m_fielddef[0]->m_numHomogeneousDir; + + // Set up Expansion information to use mode order from field + m_f->m_graph->SetExpansions(m_f->m_fielddef); + } + else + { + if (m_f->m_session->DefinesSolverInfo("HOMOGENEOUS")) + { + std::string HomoStr = + m_f->m_session->GetSolverInfo("HOMOGENEOUS"); + + if ((HomoStr == "HOMOGENEOUS1D") || + (HomoStr == "Homogeneous1D") || + (HomoStr == "1D") || (HomoStr == "Homo1D")) + { + m_f->m_numHomogeneousDir = 1; + } + if ((HomoStr == "HOMOGENEOUS2D") || + (HomoStr == "Homogeneous2D") || + (HomoStr == "2D") || (HomoStr == "Homo2D")) + { + m_f->m_numHomogeneousDir = 2; + } + } + } + + m_f->m_exp.resize(1); + + // if Range has been specified it is possible to have a + // partition which is empty so check this and return with empty + // expansion if no elements present. + if (!expansions.size()) + { + m_f->m_exp[0] = MemoryManager:: + AllocateSharedPtr(); + return; + } + + if (fldfilegiven) + { + // Set up Expansion information to use mode order from field + m_f->m_graph->SetExpansions(m_f->m_fielddef); + } + + // Adjust number of quadrature points + if (vm.count("output-points")) + { + int nPointsNew = vm["output-points"].as(); + m_f->m_graph->SetExpansionsToPointOrder(nPointsNew); + } + + if (m_f->m_verbose) + { + if (m_f->m_comm->TreatAsRankZero()) + { + timerpart.Stop(); + NekDouble cpuTime = timerpart.TimePerTest(1); + + stringstream ss; + ss << cpuTime << "s"; + cout << "\t ProcessCreateExp setexpansion CPU Time: " + << setw(8) << left + << ss.str() << endl; + timerpart.Start(); + } + } + + // Override number of planes with value from cmd line + if (m_f->m_numHomogeneousDir == 1 && vm.count("output-points-hom-z")) + { + int expdim = m_f->m_graph->GetMeshDimension(); + m_f->m_fielddef[0]->m_numModes[expdim] = + vm["output-points-hom-z"].as(); + } + + m_f->m_exp[0] = m_f->SetUpFirstExpList(m_f->m_numHomogeneousDir, + fldfilegiven); + + if (m_f->m_verbose) + { + if (m_f->m_comm->TreatAsRankZero()) + { + timerpart.Stop(); + NekDouble cpuTime = timerpart.TimePerTest(1); + + stringstream ss1; + + ss1 << cpuTime << "s"; + cout << "\t ProcessCreateExp set first exp CPU Time: " + << setw(8) << left + << ss1.str() << endl; + } + } + + if (fldfilegiven) + { + int nfields, nstrips; + + m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); + + vector vars; + if (vm.count("useSessionVariables")) + { + m_f->m_variables = m_f->m_session->GetVariables(); + } + nfields = m_f->m_variables.size(); + vars = m_f->m_variables; + + m_f->m_exp.resize(nfields * nstrips); + + // declare other fields; + for (int s = 0; s < nstrips; ++s) // homogeneous strip varient + { + for (i = 0; i < nfields; ++i) + { + if (i < vars.size()) + { + // check to see if field already defined + if (!m_f->m_exp[s * nfields + i]) + { + m_f->m_exp[s * nfields + i] = m_f->AppendExpList( + m_f->m_numHomogeneousDir, vars[i]); + } + } + else + { + if (vars.size()) + { + m_f->m_exp[s * nfields + i] = m_f->AppendExpList( + m_f->m_numHomogeneousDir, vars[0]); + } + else + { + m_f->m_exp[s * nfields + i] = m_f->AppendExpList( + m_f->m_numHomogeneousDir); + } + } + } + } + + // Extract data to coeffs and bwd transform + for (int s = 0; s < nstrips; ++s) // homogeneous strip varient + { + for (j = 0; j < nfields; ++j) + { + for (i = 0; i < m_f->m_data.size() / nstrips; ++i) + { + int n = i * nstrips + s; + // In case of multiple flds, we might not have a + // variable in this m_data[n] -> skip in this case + vector::iterator it = + find (m_f->m_fielddef[n]->m_fields.begin(), + m_f->m_fielddef[n]->m_fields.end(), + m_f->m_variables[j]); + if(it !=m_f->m_fielddef[n]->m_fields.end()) + { + m_f->m_exp[s * nfields + j]->ExtractDataToCoeffs( + m_f->m_fielddef[n], + m_f->m_data[n], + m_f->m_variables[j], + m_f->m_exp[s * nfields + j]->UpdateCoeffs()); + } + } + m_f->m_exp[s * nfields + j]->BwdTrans( + m_f->m_exp[s * nfields + j]->GetCoeffs(), + m_f->m_exp[s * nfields + j]->UpdatePhys()); + } + } + // Clear fielddef and data + // (they should not be used after running this module) + m_f->m_fielddef = vector(); + m_f->m_data = vector >(); + } + } + +} +} +} diff --git a/library/FieldUtils/ProcessModules/ProcessCreateExp.h b/library/FieldUtils/ProcessModules/ProcessCreateExp.h new file mode 100644 index 0000000000000000000000000000000000000000..231aceb669fad0cbc07bcd77e615c6967f366cd0 --- /dev/null +++ b/library/FieldUtils/ProcessModules/ProcessCreateExp.h @@ -0,0 +1,86 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: ProcessCreateExp.h +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Dummy module to create m_exp. +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIELDUTILS_PROCESSCREATEEXP +#define FIELDUTILS_PROCESSCREATEEXP + +#include "../Module.h" + +namespace Nektar +{ +namespace FieldUtils +{ + +/** + * @brief This processing module scales the input fld file + * + */ +class ProcessCreateExp : public ProcessModule +{ +public: + /// Creates an instance of this class + static boost::shared_ptr create(FieldSharedPtr f) + { + return MemoryManager::AllocateSharedPtr(f); + } + static ModuleKey className; + + ProcessCreateExp(FieldSharedPtr f); + virtual ~ProcessCreateExp(); + + /// Write mesh to output file. + virtual void Process(po::variables_map &vm); + + virtual std::string GetModuleName() + { + return "ProcessCreateExp"; + } + + virtual std::string GetModuleDescription() + { + return "Creating m_exp if needed"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreateExp; + } + +private: +}; +} +} + +#endif diff --git a/library/FieldUtils/ProcessModules/ProcessDeform.cpp b/library/FieldUtils/ProcessModules/ProcessDeform.cpp index 52fb06f02a7e40623b13d4b96f1cadb806fccb59..0c88982a5776a13147a3abbee11cfc9592ad6331 100644 --- a/library/FieldUtils/ProcessModules/ProcessDeform.cpp +++ b/library/FieldUtils/ProcessModules/ProcessDeform.cpp @@ -61,12 +61,10 @@ ProcessDeform::~ProcessDeform() void ProcessDeform::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessDeform: Deforming grid..." << endl; - } + return; } Array exp(m_f->m_exp.size()); diff --git a/library/FieldUtils/ProcessModules/ProcessDeform.h b/library/FieldUtils/ProcessModules/ProcessDeform.h index e066d12d4942fadeae8612905e845dbfc0fff3b4..163b08454bae3b5a39fae269a00dcb11af28afba 100644 --- a/library/FieldUtils/ProcessModules/ProcessDeform.h +++ b/library/FieldUtils/ProcessModules/ProcessDeform.h @@ -63,6 +63,17 @@ public: { return "ProcessDeform"; } + + virtual std::string GetModuleDescription() + { + return "Deforming grid"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessDisplacement.cpp b/library/FieldUtils/ProcessModules/ProcessDisplacement.cpp index 12d8905a79fd5834fc2c14d8b7bcc0381476402d..d03dac4308ba983b709315e561f500cd504f2ecb 100644 --- a/library/FieldUtils/ProcessModules/ProcessDisplacement.cpp +++ b/library/FieldUtils/ProcessModules/ProcessDisplacement.cpp @@ -105,17 +105,14 @@ ModuleKey ProcessDisplacement::className = ProcessDisplacement::create, "Deform a mesh given an input field defining displacement"); -ProcessDisplacement::ProcessDisplacement(FieldSharedPtr f) : ProcessModule(f) +ProcessDisplacement::ProcessDisplacement(FieldSharedPtr f) + : ProcessBoundaryExtract(f) { m_config["to"] = ConfigOption(false, "", "Name of file containing high order boundary"); - m_config["id"] = - ConfigOption(false, "", "Boundary ID to calculate displacement for"); + m_config["usevertexids"] = ConfigOption( false, "0", "Use vertex IDs instead of face IDs for matching"); - f->m_declareExpansionAsContField = true; - f->m_writeBndFld = true; - f->m_fldToBnd = false; } ProcessDisplacement::~ProcessDisplacement() @@ -124,13 +121,9 @@ ProcessDisplacement::~ProcessDisplacement() void ProcessDisplacement::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessDisplacement: Calculating displacement..." << endl; - } - } + ProcessBoundaryExtract::Process(vm); + ASSERTL0( !boost::iequals(m_config["bnd"].as(), "All"), + "ProcessDisplacement needs bnd parameter with a single id."); string toFile = m_config["to"].as(); @@ -150,15 +143,14 @@ void ProcessDisplacement::Process(po::variables_map &vm) SpatialDomains::MeshGraph::Read(bndSession); // Try to find boundary condition expansion. - int bndCondId = m_config["id"].as(); + int bndCondId = m_config["bnd"].as(); // FIXME: We should be storing boundary condition IDs // somewhere... - m_f->m_bndRegionsToWrite.push_back(bndCondId); - if (bndGraph->GetMeshDimension() == 1) { m_f->m_exp.push_back(m_f->AppendExpList(0, "v")); + m_f->m_variables.push_back("v"); MultiRegions::ExpListSharedPtr bndCondExpU = m_f->m_exp[0]->GetBndCondExpansions()[bndCondId]; @@ -221,6 +213,8 @@ void ProcessDisplacement::Process(po::variables_map &vm) { m_f->m_exp.push_back(m_f->AppendExpList(0, "v")); m_f->m_exp.push_back(m_f->AppendExpList(0, "w")); + m_f->m_variables.push_back("v"); + m_f->m_variables.push_back("w"); MultiRegions::ExpListSharedPtr bndCondExpU = m_f->m_exp[0]->GetBndCondExpansions()[bndCondId]; diff --git a/library/FieldUtils/ProcessModules/ProcessDisplacement.h b/library/FieldUtils/ProcessModules/ProcessDisplacement.h index bfdfeeae616341237be96a43b5afd0707b6acea2..2a511b5090c4b28f7575bce58805228eef96d14d 100644 --- a/library/FieldUtils/ProcessModules/ProcessDisplacement.h +++ b/library/FieldUtils/ProcessModules/ProcessDisplacement.h @@ -36,14 +36,14 @@ #ifndef FIELDUTILS_PROCESSDISPLACEMENT #define FIELDUTILS_PROCESSDISPLACEMENT -#include "../Module.h" +#include "ProcessBoundaryExtract.h" namespace Nektar { namespace FieldUtils { -class ProcessDisplacement : public ProcessModule +class ProcessDisplacement : public ProcessBoundaryExtract { public: /// Creates an instance of this class @@ -63,6 +63,12 @@ public: { return "ProcessDisplacement"; } + + virtual std::string GetModuleDescription() + { + return "Calculating displacement"; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.cpp b/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.cpp index da556211352df8e9d7a0644f48582af463bf9471..f20e260f935e6924963e03e730993ce2b8520728 100644 --- a/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.cpp +++ b/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.cpp @@ -60,8 +60,6 @@ ModuleKey ProcessEquiSpacedOutput::className = ProcessEquiSpacedOutput::ProcessEquiSpacedOutput(FieldSharedPtr f) : ProcessModule(f) { - f->m_setUpEquiSpacedFields = true; - m_config["tetonly"] = ConfigOption(true, "NotSet", "Only process tetrahedral elements"); @@ -75,22 +73,25 @@ ProcessEquiSpacedOutput::~ProcessEquiSpacedOutput() void ProcessEquiSpacedOutput::Process(po::variables_map &vm) { - SetupEquiSpacedField(); -} - -void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) -{ - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "Interpolating fields to equispaced..." << endl; - } - } int nel = m_f->m_exp[0]->GetExpSize(); if (!nel) { - m_f->m_fieldPts = LibUtilities::NullPtsField; + // Create empty PtsField + int nfields = m_f->m_variables.size(); + int coordim = 3; + + Array > pts(nfields + coordim); + for (int i = 0; i < nfields + coordim; ++i) + { + pts[i] = Array(0); + } + vector > ptsConn; + + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr( + coordim, m_f->m_variables, pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsTetBlock); + m_f->m_fieldPts->SetConnectivity(ptsConn); return; } @@ -101,38 +102,15 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) // Check if we have a homogeneous expansion bool homogeneous1D = false; - if (m_f->m_fielddef.size()) + if (m_f->m_numHomogeneousDir == 1) { - if (m_f->m_fielddef[0]->m_numHomogeneousDir == 1) - { - coordim++; - shapedim++; - homogeneous1D = true; - } - else if (m_f->m_fielddef[0]->m_numHomogeneousDir == 2) - { - ASSERTL0(false, "Homegeneous2D case not supported"); - } + coordim++; + shapedim++; + homogeneous1D = true; } - else + else if (m_f->m_numHomogeneousDir == 2) { - if (m_f->m_session->DefinesSolverInfo("HOMOGENEOUS")) - { - std::string HomoStr = m_f->m_session->GetSolverInfo("HOMOGENEOUS"); - - if ((HomoStr == "HOMOGENEOUS1D") || (HomoStr == "Homogeneous1D") || - (HomoStr == "1D") || (HomoStr == "Homo1D")) - { - coordim++; - shapedim++; - homogeneous1D = true; - } - if ((HomoStr == "HOMOGENEOUS2D") || (HomoStr == "Homogeneous2D") || - (HomoStr == "2D") || (HomoStr == "Homo2D")) - { - ASSERTL0(false, "Homegeneous2D case not supported"); - } - } + ASSERTL0(false, "Homegeneous2D case not supported"); } // set up the number of points in each element @@ -150,7 +128,6 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) LocalRegions::ExpansionSharedPtr e; // prepare PtsField - vector fieldNames; vector ppe; vector > ptsConn; int nfields; @@ -347,14 +324,7 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) cnt += newpoints; } - if (m_f->m_fielddef.size()) - { - nfields = m_f->m_exp.size(); - } - else // just the mesh points - { - nfields = 0; - } + nfields = m_f->m_variables.size(); Array > pts(nfields + coordim); @@ -376,14 +346,6 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) m_f->m_exp[0]->GetCoords(coords[0], coords[1], coords[2]); - int nq1 = m_f->m_exp[0]->GetTotPoints(); - - Array x1(nq1); - Array y1(nq1); - Array z1(nq1); - - m_f->m_exp[0]->GetCoords(x1, y1, z1); - Array tmp; for (int n = 0; n < coordim; ++n) @@ -399,45 +361,36 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) } } - if (m_f->m_fielddef.size()) + for (int n = 0; n < m_f->m_variables.size(); ++n) { - ASSERTL0(m_f->m_fielddef[0]->m_fields.size() == m_f->m_exp.size(), - "More expansion defined than fields"); + cnt = 0; + int cnt1 = 0; - for (int n = 0; n < m_f->m_exp.size(); ++n) + if (m_config["modalenergy"].m_beenSet) { - cnt = 0; - int cnt1 = 0; - - if (m_config["modalenergy"].m_beenSet) + Array phys = m_f->m_exp[n]->GetPhys(); + for (int i = 0; i < nel; ++i) { - Array phys = m_f->m_exp[n]->GetPhys(); - for (int i = 0; i < nel; ++i) - { - GenOrthoModes(i, phys + cnt, tmp = pts[coordim + n] + cnt1); - cnt1 += ppe[i]; - cnt += m_f->m_exp[0]->GetExp(i)->GetTotPoints(); - } + GenOrthoModes(i, phys + cnt, tmp = pts[coordim + n] + cnt1); + cnt1 += ppe[i]; + cnt += m_f->m_exp[0]->GetExp(i)->GetTotPoints(); } - else + } + else + { + Array phys = m_f->m_exp[n]->GetPhys(); + for (int i = 0; i < nel; ++i) { - Array phys = m_f->m_exp[n]->GetPhys(); - for (int i = 0; i < nel; ++i) - { - m_f->m_exp[0]->GetExp(i)->PhysInterpToSimplexEquiSpaced( - phys + cnt, tmp = pts[coordim + n] + cnt1); - cnt1 += ppe[i]; - cnt += m_f->m_exp[0]->GetExp(i)->GetTotPoints(); - } + m_f->m_exp[0]->GetExp(i)->PhysInterpToSimplexEquiSpaced( + phys + cnt, tmp = pts[coordim + n] + cnt1); + cnt1 += ppe[i]; + cnt += m_f->m_exp[0]->GetExp(i)->GetTotPoints(); } - - // Set up Variable string. - fieldNames.push_back(m_f->m_fielddef[0]->m_fields[n]); } } m_f->m_fieldPts = MemoryManager::AllocateSharedPtr( - coordim, fieldNames, pts); + coordim, m_f->m_variables, pts); if (shapedim == 1) { m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsSegBlock); @@ -456,6 +409,9 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void) { SetHomogeneousConnectivity(); } + + // Clear m_exp + m_f->m_exp = vector(); } void ProcessEquiSpacedOutput::SetHomogeneousConnectivity(void) @@ -656,14 +612,14 @@ void ProcessEquiSpacedOutput::SetHomogeneousConnectivity(void) } // Interior numbering - int mode = np+1; + edge2 = 0; for (int n = 1; n < np-1; n++) { + edge2 += np+1-n; for (int m = 1; m < np-n-1; m++) { - vId[cnt1+mode] = 4*nel + maxN*4*nel + cnt2; + vId[cnt1+edge2+m] = 4*nel + maxN*4*nel + cnt2; cnt2++; - mode++; } } cnt1+= newpoints; diff --git a/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.h b/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.h index 52f3b9edb324b9f37b5879a3f298edaf373f092b..86f6ee83c0e4cd4cd5d11670afa4654c3f60e54d 100644 --- a/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.h +++ b/library/FieldUtils/ProcessModules/ProcessEquiSpacedOutput.h @@ -67,10 +67,17 @@ public: return "ProcessEquiSpacedOutput"; } -protected: - ProcessEquiSpacedOutput(){}; - void SetupEquiSpacedField(void); + virtual std::string GetModuleDescription() + { + return "Interpolating fields to equispaced"; + } + virtual ModulePriority GetModulePriority() + { + return eConvertExpToPts; + } + +protected: void SetHomogeneousConnectivity(void); void GenOrthoModes(int n, diff --git a/library/FieldUtils/ProcessModules/ProcessGrad.cpp b/library/FieldUtils/ProcessModules/ProcessGrad.cpp index c7b8849d52b2e978c60c0371a611a1cbc8d0fb4d..231589d0e86acabb91d3e1da8a590aba015ae223 100644 --- a/library/FieldUtils/ProcessModules/ProcessGrad.cpp +++ b/library/FieldUtils/ProcessModules/ProcessGrad.cpp @@ -64,19 +64,36 @@ ProcessGrad::~ProcessGrad() void ProcessGrad::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int i, j; + int expdim = m_f->m_graph->GetMeshDimension(); + int spacedim = m_f->m_numHomogeneousDir + expdim; + int nfields = m_f->m_variables.size(); + int addfields = nfields * spacedim; + + for (i = 0; i < nfields; ++i) { - if (m_f->m_comm->TreatAsRankZero()) + if (spacedim == 1) { - cout << "ProcessGrad: Calculating gradients..." << endl; + m_f->m_variables.push_back(m_f->m_variables[i] + "_x"); + } + else if (spacedim == 2) + { + m_f->m_variables.push_back(m_f->m_variables[i] + "_x"); + m_f->m_variables.push_back(m_f->m_variables[i] + "_y"); + } + else if (spacedim == 3) + { + m_f->m_variables.push_back(m_f->m_variables[i] + "_x"); + m_f->m_variables.push_back(m_f->m_variables[i] + "_y"); + m_f->m_variables.push_back(m_f->m_variables[i] + "_z"); } } - int i, j; - int expdim = m_f->m_graph->GetMeshDimension(); - int spacedim = m_f->m_fielddef[0]->m_numHomogeneousDir + expdim; - int nfields = m_f->m_fielddef[0]->m_fields.size(); - int addfields = nfields * spacedim; + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; + } int npoints = m_f->m_exp[0]->GetNpoints(); Array > grad(addfields); @@ -173,56 +190,12 @@ void ProcessGrad::Process(po::variables_map &vm) for (i = 0; i < addfields; ++i) { m_f->m_exp[nfields + i] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir); + m_f->AppendExpList(m_f->m_numHomogeneousDir); Vmath::Vcopy(npoints, grad[i], 1, m_f->m_exp[nfields + i]->UpdatePhys(), 1); m_f->m_exp[nfields + i]->FwdTrans_IterPerExp( grad[i], m_f->m_exp[nfields + i]->UpdateCoeffs()); } - - vector outname; - for (i = 0; i < nfields; ++i) - { - if (spacedim == 1) - { - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_x"); - } - else if (spacedim == 2) - { - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_x"); - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_y"); - } - else if (spacedim == 3) - { - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_x"); - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_y"); - outname.push_back(m_f->m_fielddef[0]->m_fields[i] + "_z"); - } - } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (j = 0; j < nfields + addfields; ++j) - { - for (i = 0; i < FieldDef.size(); ++i) - { - if (j >= nfields) - { - FieldDef[i]->m_fields.push_back(outname[j - nfields]); - } - else - { - FieldDef[i]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessGrad.h b/library/FieldUtils/ProcessModules/ProcessGrad.h index 0c80bc88f2d68f757ba8c0834809a6762a89e85a..861facc9946bf223957dd1fd77f128a107204cda 100644 --- a/library/FieldUtils/ProcessModules/ProcessGrad.h +++ b/library/FieldUtils/ProcessModules/ProcessGrad.h @@ -67,6 +67,17 @@ public: { return "ProcessGrad"; } + + virtual std::string GetModuleDescription() + { + return "Calculating gradients"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.cpp b/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.cpp index e0975ffc2752684a63c59168fe9ab996fd44030d..74aff788d76db2b09656d0150a44904adc311c01 100644 --- a/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.cpp +++ b/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.cpp @@ -68,34 +68,33 @@ ProcessHomogeneousPlane::~ProcessHomogeneousPlane() void ProcessHomogeneousPlane::Process(po::variables_map &vm) { - if (m_f->m_verbose) + if ((m_f->m_numHomogeneousDir) != 1) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessHomogeneousPlane: Extracting plane..." << endl; - } + ASSERTL0(false, + "ProcessHomogeneousPlane only works for Homogeneous1D."); } + m_f->m_numHomogeneousDir = 0; - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) != 1) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - ASSERTL0(false, - "ProcessHomogeneousPlane only works for Homogeneous1D."); + return; } ASSERTL0(m_config["planeid"].m_beenSet, "Missing parameter planeid for ProcessHomogeneousPlane"); int planeid = m_config["planeid"].as(); - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int nstrips; m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); // Look for correct plane (because of parallel case) int plane = -1; - for (int i = 0; i < m_f->m_fielddef[0]->m_homogeneousZIDs.size(); ++i) + for (int i = 0; i < m_f->m_exp[0]->GetZIDs().num_elements(); ++i) { - if (m_f->m_fielddef[0]->m_homogeneousZIDs[i] == planeid) + if (m_f->m_exp[0]->GetZIDs()[i] == planeid) { plane = i; } @@ -117,36 +116,46 @@ void ProcessHomogeneousPlane::Process(po::variables_map &vm) } else { - m_f->m_exp[n]->FwdTrans(m_f->m_exp[n]->GetPhys(), + m_f->m_exp[n]->FwdTrans_IterPerExp(m_f->m_exp[n]->GetPhys(), m_f->m_exp[n]->UpdateCoeffs()); } } } - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - for (int s = 0; s < nstrips; ++s) + // Create new SessionReader with RowComm. This is done because when + // using a module requiring m_f->m_declareExpansionAsContField and + // outputting to vtu/dat, a new ContField with equispaced points + // is created. Since creating ContFields require communication, we have + // to change m_session->m_comm to prevent mpi from hanging + // (RowComm will only be used when creating the new expansion, + // since in other places we use m_f->m_comm) + std::vector files; + for (int i = 0; i < m_f->m_inputfiles["xml"].size(); ++i) { - for (int j = 0; j < nfields; ++j) - { - for (int i = 0; i < FieldDef.size() / nstrips; ++i) - { - int n = s * FieldDef.size() / nstrips + i; - - FieldDef[n]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - m_f->m_exp[s * nfields + j]->AppendFieldData(FieldDef[n], - FieldData[n]); - } - } + files.push_back(m_f->m_inputfiles["xml"][i]); } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; + for (int j = 0; j < m_f->m_inputfiles["xml.gz"].size(); ++j) + { + files.push_back(m_f->m_inputfiles["xml.gz"][j]); + } + vector cmdArgs; + cmdArgs.push_back("FieldConvert"); + if (m_f->m_verbose) + { + cmdArgs.push_back("--verbose"); + } + int argc = cmdArgs.size(); + const char **argv = new const char *[argc]; + for (int i = 0; i < argc; ++i) + { + argv[i] = cmdArgs[i].c_str(); + } + m_f->m_session = LibUtilities::SessionReader::CreateInstance( + argc, (char **)argv, files, m_f->m_comm->GetRowComm()); } else { + // Create empty expansion for (int s = 0; s < nstrips; ++s) { for (int i = 0; i < nfields; ++i) @@ -156,9 +165,6 @@ void ProcessHomogeneousPlane::Process(po::variables_map &vm) MemoryManager::AllocateSharedPtr(); } } - m_f->m_fielddef = - std::vector(); - m_f->m_data = std::vector >(); } } } diff --git a/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.h b/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.h index c3e41659a0a62eda46b16b4197e53fed3657b10c..a66ba92b4436dc8ff5104d79de1294dea21b5dcf 100644 --- a/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.h +++ b/library/FieldUtils/ProcessModules/ProcessHomogeneousPlane.h @@ -65,8 +65,19 @@ public: virtual std::string GetModuleName() { - return "ProcessScalGrad"; + return "ProcessHomogeneousPlane"; } + + virtual std::string GetModuleDescription() + { + return "Extracting plane"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.cpp b/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.cpp index a618661bb24dd553c00345b386e7f7a9c86d5a12..87f503be72f739f9d0b407a4e2796c03de09472a 100644 --- a/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.cpp +++ b/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.cpp @@ -60,7 +60,8 @@ ModuleKey ProcessHomogeneousStretch::className = ProcessHomogeneousStretch::ProcessHomogeneousStretch(FieldSharedPtr f) : ProcessModule(f) { - m_config["factor"] = ConfigOption(false, "NotSet", "stretch factor"); + m_config["factor"] = + ConfigOption(false, "NotSet", "integer stretch factor"); } ProcessHomogeneousStretch::~ProcessHomogeneousStretch() @@ -69,16 +70,13 @@ ProcessHomogeneousStretch::~ProcessHomogeneousStretch() void ProcessHomogeneousStretch::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->GetRank() == 0) - { - cout << "ProcessHomogeneousStretch: Stretching expansion..." - << endl; - } + return; } - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) != 1) + if ((m_f->m_numHomogeneousDir) != 1) { ASSERTL0(false, "ProcessHomogeneousStretch only works for Homogeneous1D."); @@ -88,10 +86,10 @@ void ProcessHomogeneousStretch::Process(po::variables_map &vm) "Missing parameter factor for ProcessHomogeneousStretch"); int factor = m_config["factor"].as(); - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int nplanes = m_f->m_exp[0]->GetHomogeneousBasis()->GetZ().num_elements(); - ASSERTL0(factor > 1, "Parameter factor must be greater than 1."); + ASSERTL0(factor > 1, "Parameter factor must be an int greater than 1."); int nstrips; m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); @@ -118,34 +116,9 @@ void ProcessHomogeneousStretch::Process(po::variables_map &vm) m_f->m_exp[n]->BwdTrans(m_f->m_exp[n]->GetCoeffs(), m_f->m_exp[n]->UpdatePhys()); + m_f->m_exp[n]->SetHomoLen(factor*m_f->m_exp[n]->GetHomoLen()); } } - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int s = 0; s < nstrips; ++s) - { - for (int j = 0; j < nfields; ++j) - { - for (int i = 0; i < FieldDef.size() / nstrips; ++i) - { - int n = s * FieldDef.size() / nstrips + i; - - FieldDef[n]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - m_f->m_exp[s * nfields + j]->AppendFieldData(FieldDef[n], - FieldData[n]); - } - } - } - for (int i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_homogeneousLengths[0] = - factor * m_f->m_fielddef[i]->m_homogeneousLengths[0]; - } - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.h b/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.h index 73abfcee6d22d03e36cb283b85303af00e9f039b..ec5a31c78ca457238313a0d75c0e93bb5ca590b1 100644 --- a/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.h +++ b/library/FieldUtils/ProcessModules/ProcessHomogeneousStretch.h @@ -67,6 +67,17 @@ public: { return "ProcessHomogeneousStretch"; } + + virtual std::string GetModuleDescription() + { + return "Stretching expansion"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessInnerProduct.cpp b/library/FieldUtils/ProcessModules/ProcessInnerProduct.cpp index 09fbcc4aa5df50f044d3215d83487b8a30a9883e..349e1e311454115388f8de26194526f0267c50ad 100644 --- a/library/FieldUtils/ProcessModules/ProcessInnerProduct.cpp +++ b/library/FieldUtils/ProcessModules/ProcessInnerProduct.cpp @@ -73,17 +73,12 @@ ProcessInnerProduct::~ProcessInnerProduct() void ProcessInnerProduct::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessInnerProduct: Evaluating inner product..." << endl; - } + return; } - ASSERTL0(m_f->m_exp.size() != 0, "input xml file needs to be specified"); - ASSERTL0(m_f->m_data.size() != 0, "No input data has been defined"); - string fromfld = m_config["fromfld"].as(); FieldSharedPtr fromField = boost::shared_ptr(new Field()); @@ -98,7 +93,7 @@ void ProcessInnerProduct::Process(po::variables_map &vm) ElementGIDs[i] = m_f->m_exp[0]->GetExp(i)->GetGeom()->GetGlobalID(); } - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int nphys = m_f->m_exp[0]->GetTotPoints(); NekDouble totiprod; string fields = m_config["fields"].as(); @@ -196,7 +191,7 @@ void ProcessInnerProduct::Process(po::variables_map &vm) m_f->m_exp[fid]->ExtractDataToCoeffs( allFromField[g]->m_fielddef[i], allFromField[g]->m_data[i], - allFromField[g]->m_fielddef[i]->m_fields[fid], + m_f->m_variables[fid], m_f->m_exp[fid]->UpdateCoeffs()); } @@ -236,7 +231,7 @@ NekDouble ProcessInnerProduct::IProduct( { m_f->m_exp[fid]->ExtractDataToCoeffs( fromField->m_fielddef[i], fromField->m_data[i], - fromField->m_fielddef[i]->m_fields[fid], + m_f->m_variables[fid], m_f->m_exp[fid]->UpdateCoeffs()); } diff --git a/library/FieldUtils/ProcessModules/ProcessInnerProduct.h b/library/FieldUtils/ProcessModules/ProcessInnerProduct.h index 3fb6f19d75bb14e227013eee2e6e810ae67cacd7..17dd9130ea0634c9f76304229da1ed61827d9516 100644 --- a/library/FieldUtils/ProcessModules/ProcessInnerProduct.h +++ b/library/FieldUtils/ProcessModules/ProcessInnerProduct.h @@ -68,6 +68,16 @@ public: return "ProcessInnerProduct"; } + virtual std::string GetModuleDescription() + { + return "Evaluating inner product"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: NekDouble IProduct(vector &processFields, FieldSharedPtr &fromField, diff --git a/library/FieldUtils/ProcessModules/ProcessInterpField.cpp b/library/FieldUtils/ProcessModules/ProcessInterpField.cpp index 5d3a3a4caf72a9ba33620d5c16402f58b1c0025b..6b73e8021dfaf2aefe6d8ebc78d2b53874844e14 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpField.cpp +++ b/library/FieldUtils/ProcessModules/ProcessInterpField.cpp @@ -36,6 +36,7 @@ #include using namespace std; +#include #include "ProcessInterpField.h" #include @@ -43,6 +44,10 @@ using namespace std; #include #include #include + +namespace bg = boost::geometry; +namespace bgi = boost::geometry::index; + namespace Nektar { namespace FieldUtils @@ -59,9 +64,9 @@ ProcessInterpField::ProcessInterpField(FieldSharedPtr f) : ProcessModule(f) { m_config["fromxml"] = ConfigOption( - false, "NotSet", "Xml file form which to interpolate field"); + false, "NotSet", "Xml file from which to interpolate field"); m_config["fromfld"] = ConfigOption( - false, "NotSet", "Fld file form which to interpolate field"); + false, "NotSet", "Fld file from which to interpolate field"); m_config["clamptolowervalue"] = ConfigOption(false, "-10000000", "Lower bound for interpolation value"); @@ -77,21 +82,14 @@ ProcessInterpField::~ProcessInterpField() void ProcessInterpField::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessInterpField: Interpolating field..." << endl; - } - } - - m_fromField = boost::shared_ptr(new Field()); + FieldSharedPtr fromField = boost::shared_ptr(new Field()); std::vector files; // set up session file for from field - ParseUtils::GenerateOrderedStringVector(m_config["fromxml"].as().c_str(), files); - m_fromField->m_session = + ParseUtils::GenerateOrderedStringVector( + m_config["fromxml"].as().c_str(), files); + fromField->m_session = LibUtilities::SessionReader::CreateInstance(0, 0, files); // Set up range based on min and max of local parallel partition @@ -131,16 +129,16 @@ void ProcessInterpField::Process(po::variables_map &vm) rng->m_xmax = Vmath::Vmax(npts, coords[0], 1); break; default: - ASSERTL0(false, "too many values specfied in range"); + ASSERTL0(false, "coordim should be <= 3"); } // setup rng parameters. - m_fromField->m_graph = - SpatialDomains::MeshGraph::Read(m_fromField->m_session, rng); + fromField->m_graph = + SpatialDomains::MeshGraph::Read(fromField->m_session, rng); // Read in local from field partitions const SpatialDomains::ExpansionMap &expansions = - m_fromField->m_graph->GetExpansions(); + fromField->m_graph->GetExpansions(); // check for case where no elements are specified on this // parallel partition @@ -160,59 +158,46 @@ void ProcessInterpField::Process(po::variables_map &vm) string fromfld = m_config["fromfld"].as(); m_f->FieldIOForFile(fromfld)->Import( - fromfld, m_fromField->m_fielddef, m_fromField->m_data, + fromfld, fromField->m_fielddef, fromField->m_data, LibUtilities::NullFieldMetaDataMap, ElementGIDs); - int NumHomogeneousDir = m_fromField->m_fielddef[0]->m_numHomogeneousDir; + int NumHomogeneousDir = fromField->m_fielddef[0]->m_numHomogeneousDir; //---------------------------------------------- // Set up Expansion information to use mode order from field - m_fromField->m_graph->SetExpansions(m_fromField->m_fielddef); + fromField->m_graph->SetExpansions(fromField->m_fielddef); - int nfields = m_fromField->m_fielddef[0]->m_fields.size(); + int nfields = fromField->m_fielddef[0]->m_fields.size(); - m_fromField->m_exp.resize(nfields); - m_fromField->m_exp[0] = - m_fromField->SetUpFirstExpList(NumHomogeneousDir, true); + fromField->m_exp.resize(nfields); + fromField->m_exp[0] = + fromField->SetUpFirstExpList(NumHomogeneousDir, true); m_f->m_exp.resize(nfields); // declare auxiliary fields. for (i = 1; i < nfields; ++i) { - m_f->m_exp[i] = m_f->AppendExpList(NumHomogeneousDir); - m_fromField->m_exp[i] = m_fromField->AppendExpList(NumHomogeneousDir); + m_f->m_exp[i] = m_f->AppendExpList(NumHomogeneousDir); + fromField->m_exp[i] = fromField->AppendExpList(NumHomogeneousDir); } // load field into expansion in fromfield. for (int j = 0; j < nfields; ++j) { - for (i = 0; i < m_fromField->m_fielddef.size(); i++) + for (i = 0; i < fromField->m_fielddef.size(); i++) { - m_fromField->m_exp[j]->ExtractDataToCoeffs( - m_fromField->m_fielddef[i], m_fromField->m_data[i], - m_fromField->m_fielddef[0]->m_fields[j], - m_fromField->m_exp[j]->UpdateCoeffs()); + fromField->m_exp[j]->ExtractDataToCoeffs( + fromField->m_fielddef[i], fromField->m_data[i], + fromField->m_fielddef[0]->m_fields[j], + fromField->m_exp[j]->UpdateCoeffs()); } - m_fromField->m_exp[j]->BwdTrans(m_fromField->m_exp[j]->GetCoeffs(), - m_fromField->m_exp[j]->UpdatePhys()); + fromField->m_exp[j]->BwdTrans(fromField->m_exp[j]->GetCoeffs(), + fromField->m_exp[j]->UpdatePhys()); } int nq1 = m_f->m_exp[0]->GetTotPoints(); - Array x1(nq1); - Array y1(nq1); - Array z1(nq1); - - if (coordim == 2) - { - m_f->m_exp[0]->GetCoords(x1, y1); - } - else if (coordim == 3) - { - m_f->m_exp[0]->GetCoords(x1, y1, z1); - } - NekDouble clamp_low = m_config["clamptolowervalue"].as(); NekDouble clamp_up = m_config["clamptouppervalue"].as(); NekDouble def_value = m_config["defaultvalue"].as(); @@ -226,12 +211,12 @@ void ProcessInterpField::Process(po::variables_map &vm) } Interpolator interp; - if (m_f->m_comm->GetRank() == 0) + if (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()) { interp.SetProgressCallback(&ProcessInterpField::PrintProgressbar, this); } - interp.Interpolate(m_fromField->m_exp, m_f->m_exp); - if (m_f->m_comm->GetRank() == 0) + interp.Interpolate(fromField->m_exp, m_f->m_exp); + if (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()) { cout << endl; } @@ -249,27 +234,11 @@ void ProcessInterpField::Process(po::variables_map &vm) m_f->m_exp[i]->UpdatePhys()[j] = clamp_low; } } + m_f->m_exp[i]->FwdTrans_IterPerExp( + m_f->m_exp[i]->GetPhys(), m_f->m_exp[i]->UpdateCoeffs()); } - - // put field into field data for output - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int j = 0; j < nfields; ++j) - { - m_f->m_exp[j]->FwdTrans(m_f->m_exp[j]->GetPhys(), - m_f->m_exp[j]->UpdateCoeffs()); - for (i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back( - m_fromField->m_fielddef[0]->m_fields[j]); - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; + // save field names + m_f->m_variables = fromField->m_fielddef[0]->m_fields; } void ProcessInterpField::PrintProgressbar(const int position, diff --git a/library/FieldUtils/ProcessModules/ProcessInterpField.h b/library/FieldUtils/ProcessModules/ProcessInterpField.h index 5c21555d8c8b0e398427ff72c822abdd3dde75c7..62d3a6f29c6d355bd66d00e3d3eb5f5b3cda5367 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpField.h +++ b/library/FieldUtils/ProcessModules/ProcessInterpField.h @@ -69,8 +69,16 @@ public: return "ProcessInterpField"; } -private: - FieldSharedPtr m_fromField; + virtual std::string GetModuleDescription() + { + return "Interpolating field"; + } + + virtual ModulePriority GetModulePriority() + { + return eFillExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.cpp b/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.cpp index 9a62be07d6e03c3561a251921e3b73a6ccc4b49c..4d5ef7b1c662a196abadcf7b0c99f3f2d96d78da 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.cpp +++ b/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.cpp @@ -38,6 +38,7 @@ using namespace std; +#include #include "ProcessInterpPointDataToFld.h" #include @@ -45,6 +46,10 @@ using namespace std; #include #include #include + +namespace bg = boost::geometry; +namespace bgi = boost::geometry::index; + namespace Nektar { namespace FieldUtils @@ -60,6 +65,8 @@ ModuleKey ProcessInterpPointDataToFld::className = ProcessInterpPointDataToFld::ProcessInterpPointDataToFld(FieldSharedPtr f) : ProcessModule(f) { + m_config["frompts"] = ConfigOption( + false, "NotSet", "Pts file from which to interpolate field"); m_config["interpcoord"] = ConfigOption(false, "-1", "coordinate id to use for interpolation"); @@ -71,30 +78,29 @@ ProcessInterpPointDataToFld::~ProcessInterpPointDataToFld() void ProcessInterpPointDataToFld::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout - << "ProcessInterpPointDataToFld: interpolating data to field..." - << endl; - } - } - int i, j; - - // Check for command line point specification if no .pts file specified - ASSERTL0(m_f->m_fieldPts != LibUtilities::NullPtsField, - "No input points found"); - - int nFields = m_f->m_fieldPts->GetNFields(); + LibUtilities::PtsFieldSharedPtr fieldPts; + // Load pts file + ASSERTL0( m_config["frompts"].as().compare("NotSet") != 0, + "ProcessInterpPointDataToFld requires frompts parameter"); + string inFile = m_config["frompts"].as().c_str(); + LibUtilities::CommSharedPtr c = + LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); + LibUtilities::PtsIOSharedPtr ptsIO = + MemoryManager::AllocateSharedPtr(c); + ptsIO->Import(inFile, fieldPts); + + int nFields = fieldPts->GetNFields(); ASSERTL0(nFields > 0, "No field values provided in input"); - // assume one field is already defined from input file. - m_f->m_exp.resize(nFields + 1); + // Define new expansions. + ASSERTL0(m_f->m_numHomogeneousDir == 0, + "ProcessInterpPointDataToFld does not support homogeneous expansion"); + + m_f->m_exp.resize(nFields); for (i = 1; i < nFields; ++i) { - m_f->m_exp[i] = m_f->AppendExpList(0); + m_f->m_exp[i] = m_f->AppendExpList(m_f->m_numHomogeneousDir); } int totpoints = m_f->m_exp[0]->GetTotPoints(); @@ -108,18 +114,18 @@ void ProcessInterpPointDataToFld::Process(po::variables_map &vm) MemoryManager::AllocateSharedPtr(3, intFields); int coord_id = m_config["interpcoord"].as(); - ASSERTL0(coord_id <= m_f->m_fieldPts->GetDim() - 1, + ASSERTL0(coord_id <= fieldPts->GetDim() - 1, "interpcoord is bigger than the Pts files dimension"); Interpolator interp(eNoMethod, coord_id); - if (m_f->m_comm->GetRank() == 0) + if (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()) { interp.SetProgressCallback( &ProcessInterpPointDataToFld::PrintProgressbar, this); } - interp.Interpolate(m_f->m_fieldPts, outPts); - if (m_f->m_comm->GetRank() == 0) + interp.Interpolate(fieldPts, outPts); + if (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()) { cout << endl; } @@ -128,7 +134,7 @@ void ProcessInterpPointDataToFld::Process(po::variables_map &vm) { for (j = 0; j < nFields; ++j) { - m_f->m_exp[j]->SetPhys(i, outPts->GetPointVal(j, i)); + m_f->m_exp[j]->SetPhys(i, outPts->GetPointVal(3 + j, i)); } } @@ -139,23 +145,11 @@ void ProcessInterpPointDataToFld::Process(po::variables_map &vm) m_f->m_exp[i]->UpdateCoeffs()); } - // set up output fld file. - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (j = 0; j < nFields; ++j) + // save field names + for (int j = 0; j < fieldPts->GetNFields(); ++j) { - for (i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back(m_f->m_fieldPts->GetFieldName(j)); - - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } + m_f->m_variables.push_back(fieldPts->GetFieldName(j)); } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.h b/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.h index 2562e068efbc7fc5a6e0d0fab71e071fc59e0f32..f9c4c0a27f79efa69361ac63c7fd5b77930bdce6 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.h +++ b/library/FieldUtils/ProcessModules/ProcessInterpPointDataToFld.h @@ -69,6 +69,16 @@ public: return "ProcessInterpPointDataToFld"; } + virtual std::string GetModuleDescription() + { + return "Interpolating data to field"; + } + + virtual ModulePriority GetModulePriority() + { + return eFillExp; + } + void PrintProgressbar(const int position, const int goal) const { LibUtilities::PrintProgressbar(position, goal, "Interpolating"); diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPoints.cpp b/library/FieldUtils/ProcessModules/ProcessInterpPoints.cpp index ade14c05d0e64465d536b42965c78f324bc1fdeb..254c55786489bf4098644bc9f6a5dca9aa9b62e0 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpPoints.cpp +++ b/library/FieldUtils/ProcessModules/ProcessInterpPoints.cpp @@ -37,15 +37,20 @@ #include using namespace std; +#include #include "ProcessInterpPoints.h" #include + #include #include #include #include #include +namespace bg = boost::geometry; +namespace bgi = boost::geometry::index; + namespace Nektar { namespace FieldUtils @@ -55,24 +60,29 @@ ModuleKey ProcessInterpPoints::className = GetModuleFactory().RegisterCreatorFunction( ModuleKey(eProcessModule, "interppoints"), ProcessInterpPoints::create, - "Interpolates a set of points to another, requires fromfld and " - "fromxml to be defined, a line, plane or block of points can be " - "defined"); + "Interpolates a field to a set of points. Requires fromfld, fromxml " + "to be defined, and a topts, line, plane or block of target points "); ProcessInterpPoints::ProcessInterpPoints(FieldSharedPtr f) : ProcessModule(f) { - m_config["fromxml"] = ConfigOption( false, "NotSet", "Xml file from which to interpolate field"); - ASSERTL0(m_config["fromxml"].as().compare("NotSet") != 0, - "Need to specify fromxml=file.xml"); - m_config["fromfld"] = ConfigOption( false, "NotSet", "Fld file from which to interpolate field"); - ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, - "Need to specify fromfld=file.fld "); + m_config["topts"] = ConfigOption( + false, "NotSet", "Pts file to which interpolate field"); + m_config["line"] = ConfigOption( + false, "NotSet", "Specify a line of N points using " + "line=N,x0,y0,z0,z1,y1,z1"); + m_config["plane"] = ConfigOption( + false, "NotSet", "Specify a plane of N1 x N2 points using " + "plane=N1,N2,x0,y0,z0,z1,y1,z1,x2,y2,z2,x3,y3,z3"); + m_config["box"] = ConfigOption( + false, "NotSet", "Specify a rectangular box of N1 x N2 x N3 points " + "using a box of points limited by box=" + "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); m_config["clamptolowervalue"] = ConfigOption(false, "-10000000", "Lower bound for interpolation value"); @@ -80,22 +90,10 @@ ProcessInterpPoints::ProcessInterpPoints(FieldSharedPtr f) : ProcessModule(f) ConfigOption(false, "10000000", "Upper bound for interpolation value"); m_config["defaultvalue"] = ConfigOption(false, "0", "Default value if point is outside domain"); - m_config["line"] = - ConfigOption(false, "NotSet", "Specify a line of N points using " - "line=N,x0,y0,z0,z1,y1,z1"); - m_config["plane"] = ConfigOption( - false, "NotSet", "Specify a plane of N1 x N2 points using " - "plane=N1,N2,x0,y0,z0,z1,y1,z1,x2,y2,z2,x3," - "y3,z3"); - m_config["box"] = ConfigOption( - false, "NotSet", "Specify a rectangular box of N1 x N2 x N3 points " - "using a box of points limited by box=" - "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); m_config["cp"] = ConfigOption(false, "NotSet", - "Parameters p0 and q to determine pressure coefficients " - "(box only currently)"); + "Parameters p0 and q to determine pressure coefficients"); } ProcessInterpPoints::~ProcessInterpPoints() @@ -104,366 +102,27 @@ ProcessInterpPoints::~ProcessInterpPoints() void ProcessInterpPoints::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessInterpPoints: interpolating to points..." << endl; - } - } - - int rank = m_f->m_comm->GetRank(); - int nprocs = m_f->m_comm->GetSize(); - - // Check for command line point specification if no .pts file - // specified - if (m_f->m_fieldPts == LibUtilities::NullPtsField) - { - if (m_config["line"].as().compare("NotSet") != 0) - { - string help = m_config["line"].as(); - vector values; - ASSERTL0(ParseUtils::GenerateUnOrderedVector( - m_config["line"].as().c_str(), values), - "Failed to interpret line string"); - - ASSERTL0(values.size() > 2, - "line string should contain 2 Dim+1 values " - "N,x0,y0,z0,x1,y1,z1"); - - double tmp; - ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N is not an integer"); - ASSERTL0(values[0] > 1, "N is not a valid number"); - - int dim = (values.size() - 1) / 2; - int npts = values[0]; - Array > pts(dim); - - for (int i = 0; i < dim; ++i) - { - pts[i] = Array(npts); - } - - for (int i = 0; i < npts; ++i) - { - pts[0][i] = - values[1] + - i / ((NekDouble)(npts - 1)) * (values[dim + 1] - values[1]); - if (dim > 1) - { - pts[1][i] = values[2] + - i / ((NekDouble)(npts - 1)) * - (values[dim + 2] - values[2]); - - if (dim > 2) - { - pts[2][i] = values[3] + - i / ((NekDouble)(npts - 1)) * - (values[dim + 3] - values[3]); - } - } - } - - vector ppe; - ppe.push_back(npts); - m_f->m_fieldPts = - MemoryManager::AllocateSharedPtr(dim, - pts); - m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsLine); - m_f->m_fieldPts->SetPointsPerEdge(ppe); - } - else if (m_config["plane"].as().compare("NotSet") != 0) - { - string help = m_config["plane"].as(); - vector values; - ASSERTL0(ParseUtils::GenerateUnOrderedVector( - m_config["plane"].as().c_str(), values), - "Failed to interpret plane string"); - - ASSERTL0(values.size() > 9, - "plane string should contain 4 Dim+2 values " - "N1,N2,x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3"); - - double tmp; - ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N1 is not an integer"); - ASSERTL0(std::modf(values[1], &tmp) == 0.0, "N2 is not an integer"); - - ASSERTL0(values[0] > 1, "N1 is not a valid number"); - ASSERTL0(values[1] > 1, "N2 is not a valid number"); - - int dim = (values.size() - 2) / 4; - - int npts1 = values[0]; - int npts2 = values[1]; - - Array > pts(dim); - - int totpts = npts1 * npts2; - int nlocpts = totpts / nprocs; - - if (rank < nprocs - 1) - { - for (int i = 0; i < dim; ++i) - { - pts[i] = Array(nlocpts); - } - - int cnt = 0; - int cntloc = 0; - - for (int j = 0; j < npts2; ++j) - { - for (int i = 0; i < npts1; ++i) - { - - if ((cnt >= rank * nlocpts) && - (cnt < (rank + 1) * nlocpts)) - { - pts[0][cntloc] = - (values[2] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 2] - values[2])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 2] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 2] - - values[3 * dim + 2])) * - (j / ((NekDouble)(npts2 - 1))); - - pts[1][cntloc] = - (values[3] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 3] - values[3])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 3] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 3] - - values[3 * dim + 3])) * - (j / ((NekDouble)(npts2 - 1))); - - if (dim > 2) - { - pts[2][cntloc] = - (values[4] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 4] - values[4])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 4] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 4] - - values[3 * dim + 4])) * - (j / ((NekDouble)(npts2 - 1))); - } - cntloc++; - } - cnt++; - } - } - } - else - { - totpts = totpts - rank * nlocpts; - - for (int i = 0; i < dim; ++i) - { - pts[i] = Array(totpts); - } - - int cnt = 0; - int cntloc = 0; - - for (int j = 0; j < npts2; ++j) - { - for (int i = 0; i < npts1; ++i) - { - - if (cnt >= rank * nlocpts) - { - pts[0][cntloc] = - (values[2] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 2] - values[2])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 2] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 2] - - values[3 * dim + 2])) * - (j / ((NekDouble)(npts2 - 1))); - - pts[1][cntloc] = - (values[3] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 3] - values[3])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 3] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 3] - - values[3 * dim + 3])) * - (j / ((NekDouble)(npts2 - 1))); - - if (dim > 2) - { - pts[2][cntloc] = - (values[4] + - i / ((NekDouble)(npts1 - 1)) * - (values[dim + 4] - values[4])) * - (1.0 - j / ((NekDouble)(npts2 - 1))) + - (values[3 * dim + 4] + - i / ((NekDouble)(npts1 - 1)) * - (values[2 * dim + 4] - - values[3 * dim + 4])) * - (j / ((NekDouble)(npts2 - 1))); - } - cntloc++; - } - cnt++; - } - } - } - - vector ppe; - ppe.push_back(npts1); - ppe.push_back(npts2); - m_f->m_fieldPts = - MemoryManager::AllocateSharedPtr(dim, - pts); - m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsPlane); - m_f->m_fieldPts->SetPointsPerEdge(ppe); - } - else if (m_config["box"].as().compare("NotSet") != 0) - { - string help = m_config["box"].as(); - vector values; - ASSERTL0(ParseUtils::GenerateUnOrderedVector( - m_config["box"].as().c_str(), values), - "Failed to interpret box string"); - - ASSERTL0(values.size() == 9, - "box string should contain 9 values " - "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); - - int dim = 3; - - int npts1 = values[0]; - int npts2 = values[1]; - int npts3 = values[2]; - - Array > pts(dim); - - int totpts = npts1 * npts2 * npts3; - int nlocpts = totpts / nprocs; - - if (rank < nprocs - 1) // for rank 0 to nproc-1 - { - totpts = nlocpts; - - for (int i = 0; i < dim; ++i) - { - pts[i] = Array(totpts); - } - - int cnt = 0; - int cntloc = 0; - - for (int k = 0; k < npts3; ++k) - { - for (int j = 0; j < npts2; ++j) - { - for (int i = 0; i < npts1; ++i) - { - if ((cnt >= rank * nlocpts) && - (cnt < (rank + 1) * nlocpts)) - { - pts[0][cntloc] = values[3] + - i / ((NekDouble)(npts1 - 1)) * - (values[4] - values[3]); - pts[1][cntloc] = values[5] + - j / ((NekDouble)(npts2 - 1)) * - (values[6] - values[5]); - pts[2][cntloc] = values[7] + - k / ((NekDouble)(npts3 - 1)) * - (values[8] - values[7]); - cntloc++; - } - cnt++; - } - } - } - } - else // give last rank all remaining points - { - totpts = totpts - rank * nlocpts; - - for (int i = 0; i < dim; ++i) - { - pts[i] = Array(totpts); - } - - int cnt = 0; - int cntloc = 0; - - for (int k = 0; k < npts3; ++k) - { - for (int j = 0; j < npts2; ++j) - { - for (int i = 0; i < npts1; ++i) - { - if (cnt >= rank * nlocpts) - { - pts[0][cntloc] = values[3] + - i / ((NekDouble)(npts1 - 1)) * - (values[4] - values[3]); - pts[1][cntloc] = values[5] + - j / ((NekDouble)(npts2 - 1)) * - (values[6] - values[5]); - pts[2][cntloc] = values[7] + - k / ((NekDouble)(npts3 - 1)) * - (values[8] - values[7]); - cntloc++; - } - cnt++; - } - } - } - } - - vector ppe; - ppe.push_back(npts1); - ppe.push_back(npts2); - ppe.push_back(npts3); - m_f->m_fieldPts = - MemoryManager::AllocateSharedPtr(dim, - pts); - m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsBox); - m_f->m_fieldPts->SetPointsPerEdge(ppe); - vector boxdim; - boxdim.assign(&values[3], &values[3] + 6); - m_f->m_fieldPts->SetBoxSize(boxdim); - } - } + CreateFieldPts(vm); FieldSharedPtr fromField = boost::shared_ptr(new Field()); - std::vector files; - ParseUtils::GenerateOrderedStringVector(m_config["fromxml"].as().c_str(), files); + ParseUtils::GenerateOrderedStringVector( + m_config["fromxml"].as().c_str(), files); // set up session file for from field fromField->m_session = LibUtilities::SessionReader::CreateInstance(0, 0, files); - // Set up range based on min and max of local parallel partition SpatialDomains::DomainRangeShPtr rng = MemoryManager::AllocateSharedPtr(); - int coordim = m_f->m_fieldPts->GetDim(); int npts = m_f->m_fieldPts->GetNpoints(); Array > pts; m_f->m_fieldPts->GetPts(pts); - rng->m_checkShape = false; rng->m_zmin = -1; - rng->m_zmax = 1; + rng->m_zmax = 1; rng->m_ymin = -1; - rng->m_ymax = 1; + rng->m_ymax = 1; switch (coordim) { case 3: @@ -487,53 +146,40 @@ void ProcessInterpPoints::Process(po::variables_map &vm) default: ASSERTL0(false, "too many values specfied in range"); } - // setup rng parameters. fromField->m_graph = SpatialDomains::MeshGraph::Read(fromField->m_session, rng); - // Read in local from field partitions const SpatialDomains::ExpansionMap &expansions = fromField->m_graph->GetExpansions(); - Array ElementGIDs(expansions.size()); SpatialDomains::ExpansionMap::const_iterator expIt; - int i = 0; for (expIt = expansions.begin(); expIt != expansions.end(); ++expIt) { ElementGIDs[i++] = expIt->second->m_geomShPtr->GetGlobalID(); } - // check to see that we do have some elmement in teh domain since // possibly all points could be outside of the domain ASSERTL0(i > 0, "No elements are set. Are the interpolated points " "wihtin the domain given by the xml files?"); - string fromfld = m_config["fromfld"].as(); m_f->FieldIOForFile(fromfld)->Import( fromfld, fromField->m_fielddef, fromField->m_data, LibUtilities::NullFieldMetaDataMap, ElementGIDs); - int NumHomogeneousDir = fromField->m_fielddef[0]->m_numHomogeneousDir; - //---------------------------------------------- // Set up Expansion information to use mode order from field fromField->m_graph->SetExpansions(fromField->m_fielddef); - int nfields = fromField->m_fielddef[0]->m_fields.size(); - fromField->m_exp.resize(nfields); fromField->m_exp[0] = fromField->SetUpFirstExpList(NumHomogeneousDir, true); - m_f->m_exp.resize(nfields); - // declare auxiliary fields. for (i = 1; i < nfields; ++i) { fromField->m_exp[i] = fromField->AppendExpList(NumHomogeneousDir); } - // load field into expansion in fromfield. for (int j = 0; j < nfields; ++j) { @@ -546,10 +192,10 @@ void ProcessInterpPoints::Process(po::variables_map &vm) } fromField->m_exp[j]->BwdTrans(fromField->m_exp[j]->GetCoeffs(), fromField->m_exp[j]->UpdatePhys()); - Array newPts(m_f->m_fieldPts->GetNpoints()); m_f->m_fieldPts->AddField(newPts, fromField->m_fielddef[0]->m_fields[j]); + m_f->m_variables.push_back(fromField->m_fielddef[0]->m_fields[j]); } NekDouble clamp_low = m_config["clamptolowervalue"].as(); @@ -565,6 +211,220 @@ void ProcessInterpPoints::Process(po::variables_map &vm) } } +void ProcessInterpPoints::CreateFieldPts(po::variables_map &vm) +{ + int rank = m_f->m_comm->GetRank(); + int nprocs = m_f->m_comm->GetSize(); + // Check for command line point specification + if (m_config["topts"].as().compare("NotSet") != 0) + { + string inFile = m_config["topts"].as(); + + LibUtilities::PtsIOSharedPtr ptsIO = + MemoryManager::AllocateSharedPtr(m_f->m_comm); + + ptsIO->Import(inFile, m_f->m_fieldPts); + } + else if (m_config["line"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["line"].as().c_str(), values), + "Failed to interpret line string"); + + ASSERTL0(values.size() > 2, + "line string should contain 2*Dim+1 values " + "N,x0,y0,z0,x1,y1,z1"); + + double tmp; + ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N is not an integer"); + ASSERTL0(values[0] > 1, "N is not a valid number"); + + int dim = (values.size() - 1) / 2; + int npts = values[0]; + + // Information for partitioning + int ptsPerProc = npts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: npts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + Array > pts(dim); + Array delta(dim); + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta[i] = (values[dim + i + 1] - values[ i + 1]) / (npts - 1); + } + + + + for (int i = 0, cntLoc = 0; i < npts; ++i) + { + if (i >= start && i < end) + { + for (int n = 0; n < dim; ++n) + { + pts[n][cntLoc] = values[n+1] + i * delta[n]; + } + ++cntLoc; + } + } + + vector ppe; + ppe.push_back(npts); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsLine); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + } + else if (m_config["plane"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["plane"].as().c_str(), values), + "Failed to interpret plane string"); + + ASSERTL0(values.size() > 9, + "plane string should contain 4 Dim+2 values " + "N1,N2,x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3"); + + double tmp; + ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N1 is not an integer"); + ASSERTL0(std::modf(values[1], &tmp) == 0.0, "N2 is not an integer"); + + ASSERTL0(values[0] > 1, "N1 is not a valid number"); + ASSERTL0(values[1] > 1, "N2 is not a valid number"); + + int dim = (values.size() - 2) / 4; + + Array npts(2); + npts[0] = values[0]; + npts[1] = values[1]; + + int totpts = npts[0] * npts[1]; + + // Information for partitioning + int ptsPerProc = totpts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + Array > pts(dim); + Array delta1(dim); + Array delta2(dim); + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta1[i] = (values[2+1*dim + i] - values[2+0*dim + i])/(npts[0]-1); + delta2[i] = (values[2+2*dim + i] - values[2+3*dim + i])/(npts[0]-1); + } + + for (int j = 0, cnt = 0, cntLoc = 0; j < npts[1]; ++j) + { + for (int i = 0; i < npts[0]; ++i, ++cnt) + { + if (cnt >= start && cnt < end) + { + for (int n = 0; n < dim; ++n) + { + pts[n][cntLoc] = + (values[2+n] + i * delta1[n]) * + (1.0 - j / ((NekDouble)(npts[1]-1))) + + (values[2 + 3*dim + n] + i * delta2[n]) * + ( j / ((NekDouble)(npts[1]-1))); + } + ++cntLoc; + } + } + } + + vector ppe; + ppe.push_back(npts[0]); + ppe.push_back(npts[1]); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsPlane); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + } + else if (m_config["box"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["box"].as().c_str(), values), + "Failed to interpret box string"); + + ASSERTL0(values.size() == 9, + "box string should contain 9 values " + "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); + + int dim = 3; + Array npts(3); + npts[0] = values[0]; + npts[1] = values[1]; + npts[2] = values[2]; + + int totpts = npts[0]*npts[1]*npts[2]; + + Array > pts(dim); + Array delta(dim); + + // Information for partitioning + int ptsPerProc = totpts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta[i] = (values[4 + 2*i] - values[3 + 2*i]) / (npts[i] - 1); + } + + + + for (int k = 0, cnt = 0, cntLoc = 0; k < npts[2]; ++k) + { + for (int j = 0; j < npts[1]; ++j) + { + for (int i = 0; i < npts[0]; ++i, ++cnt) + { + if (cnt >= start && cnt < end) + { + pts[0][cntLoc] = values[3] + i * delta[0]; + pts[1][cntLoc] = values[5] + j * delta[1]; + pts[2][cntLoc] = values[7] + k * delta[2]; + ++cntLoc; + } + } + } + } + + vector ppe; + ppe.push_back(npts[0]); + ppe.push_back(npts[1]); + ppe.push_back(npts[2]); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsBox); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + vector boxdim; + boxdim.assign(&values[3], &values[3] + 6); + m_f->m_fieldPts->SetBoxSize(boxdim); + } + else + { + ASSERTL0(false, + "Missing target points for ProcessInterpPoints."); + } +} + void ProcessInterpPoints::InterpolateFieldToPts( vector &field0, LibUtilities::PtsFieldSharedPtr &pts, @@ -683,9 +543,11 @@ void ProcessInterpPoints::calcCp0() if(pfield != -1) { pts->AddField(data[0], "Cp"); + m_f->m_variables.push_back("Cp"); if(velid.size()) { pts->AddField(data[1], "Cp0"); + m_f->m_variables.push_back("Cp0"); } } } diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPoints.h b/library/FieldUtils/ProcessModules/ProcessInterpPoints.h index f0f72057d4aff48fdf20645b374fd6f7d2435853..987fe76264430fb47be4a1e3feffd7fbfcd544e3 100644 --- a/library/FieldUtils/ProcessModules/ProcessInterpPoints.h +++ b/library/FieldUtils/ProcessModules/ProcessInterpPoints.h @@ -71,7 +71,19 @@ public: return "ProcessInterpPoints"; } + virtual std::string GetModuleDescription() + { + return "Interpolating to points"; + } + + virtual ModulePriority GetModulePriority() + { + return eCreatePts; + } + private: + void CreateFieldPts(po::variables_map &vm); + void InterpolateFieldToPts(vector &field0, LibUtilities::PtsFieldSharedPtr &pts, NekDouble clamp_low, diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.cpp b/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.cpp new file mode 100644 index 0000000000000000000000000000000000000000..69a1efc969413caad1099268ba43283871b1de40 --- /dev/null +++ b/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.cpp @@ -0,0 +1,475 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: ProcessInterpPtsToPts.cpp +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Interpolate field to a series of specified points. +// +//////////////////////////////////////////////////////////////////////////////// + +#include +#include +using namespace std; + +#include "ProcessInterpPtsToPts.h" + +#include +#include +#include +#include +#include +#include + +namespace Nektar +{ +namespace FieldUtils +{ + +ModuleKey ProcessInterpPtsToPts::className = + GetModuleFactory().RegisterCreatorFunction( + ModuleKey(eProcessModule, "interpptstopts"), + ProcessInterpPtsToPts::create, + "Interpolates a set of points to another, requires fromfld and " + "fromxml to be defined, a line, plane or block of points can be " + "defined"); + +ProcessInterpPtsToPts::ProcessInterpPtsToPts(FieldSharedPtr f) : ProcessModule(f) +{ + m_config["topts"] = ConfigOption( + false, "NotSet", "Pts file to which interpolate field"); + m_config["line"] = ConfigOption( + false, "NotSet", "Specify a line of N points using " + "line=N,x0,y0,z0,z1,y1,z1"); + m_config["plane"] = ConfigOption( + false, "NotSet", "Specify a plane of N1 x N2 points using " + "plane=N1,N2,x0,y0,z0,z1,y1,z1,x2,y2,z2,x3,y3,z3"); + m_config["box"] = ConfigOption( + false, "NotSet", "Specify a rectangular box of N1 x N2 x N3 points " + "using a box of points limited by box=" + "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); + + m_config["clamptolowervalue"] = + ConfigOption(false, "-10000000", "Lower bound for interpolation value"); + m_config["clamptouppervalue"] = + ConfigOption(false, "10000000", "Upper bound for interpolation value"); + m_config["defaultvalue"] = + ConfigOption(false, "0", "Default value if point is outside domain"); + + m_config["cp"] = + ConfigOption(false, "NotSet", + "Parameters p0 and q to determine pressure coefficients"); +} + +ProcessInterpPtsToPts::~ProcessInterpPtsToPts() +{ +} + +void ProcessInterpPtsToPts::Process(po::variables_map &vm) +{ + ASSERTL0(m_f->m_fieldPts != LibUtilities::NullPtsField, + "Should have a PtsField for ProcessInterpPtsToPts."); + ASSERTL0(m_f->m_comm->GetSize() == 1, + "ProcessInterpPtsToPts not implemented in parallel."); + + // Move m_f->m_fieldPts + LibUtilities::PtsFieldSharedPtr oldPts = m_f->m_fieldPts; + m_f->m_fieldPts = LibUtilities::NullPtsField; + + // Create new fieldPts + CreateFieldPts(vm); + + int nfields = m_f->m_variables.size(); + for (int j = 0; j < nfields; ++j) + { + Array newPts(m_f->m_fieldPts->GetNpoints()); + m_f->m_fieldPts->AddField(newPts, m_f->m_variables[j]); + } + + NekDouble clamp_low = m_config["clamptolowervalue"].as(); + NekDouble clamp_up = m_config["clamptouppervalue"].as(); + NekDouble def_value = m_config["defaultvalue"].as(); + + InterpolatePtsToPts(oldPts, m_f->m_fieldPts, clamp_low, + clamp_up, def_value); + + if (!boost::iequals(m_config["cp"].as(), "NotSet")) + { + calcCp0(); + } +} + +void ProcessInterpPtsToPts::CreateFieldPts(po::variables_map &vm) +{ + int rank = m_f->m_comm->GetRank(); + int nprocs = m_f->m_comm->GetSize(); + // Check for command line point specification + if (m_config["topts"].as().compare("NotSet") != 0) + { + string inFile = m_config["topts"].as(); + + LibUtilities::PtsIOSharedPtr ptsIO = + MemoryManager::AllocateSharedPtr(m_f->m_comm); + + ptsIO->Import(inFile, m_f->m_fieldPts); + } + else if (m_config["line"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["line"].as().c_str(), values), + "Failed to interpret line string"); + + ASSERTL0(values.size() > 2, + "line string should contain 2*Dim+1 values " + "N,x0,y0,z0,x1,y1,z1"); + + double tmp; + ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N is not an integer"); + ASSERTL0(values[0] > 1, "N is not a valid number"); + + int dim = (values.size() - 1) / 2; + int npts = values[0]; + + // Information for partitioning + int ptsPerProc = npts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: npts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + Array > pts(dim); + Array delta(dim); + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta[i] = (values[dim + i + 1] - values[ i + 1]) / (npts - 1); + } + + + + for (int i = 0, cntLoc = 0; i < npts; ++i) + { + if (i >= start && i < end) + { + for (int n = 0; n < dim; ++n) + { + pts[n][cntLoc] = values[n+1] + i * delta[n]; + } + ++cntLoc; + } + } + + vector ppe; + ppe.push_back(npts); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsLine); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + } + else if (m_config["plane"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["plane"].as().c_str(), values), + "Failed to interpret plane string"); + + ASSERTL0(values.size() > 9, + "plane string should contain 4 Dim+2 values " + "N1,N2,x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3"); + + double tmp; + ASSERTL0(std::modf(values[0], &tmp) == 0.0, "N1 is not an integer"); + ASSERTL0(std::modf(values[1], &tmp) == 0.0, "N2 is not an integer"); + + ASSERTL0(values[0] > 1, "N1 is not a valid number"); + ASSERTL0(values[1] > 1, "N2 is not a valid number"); + + int dim = (values.size() - 2) / 4; + + Array npts(2); + npts[0] = values[0]; + npts[1] = values[1]; + + int totpts = npts[0] * npts[1]; + + // Information for partitioning + int ptsPerProc = totpts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + Array > pts(dim); + Array delta1(dim); + Array delta2(dim); + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta1[i] = (values[2+1*dim + i] - values[2+0*dim + i])/(npts[0]-1); + delta2[i] = (values[2+2*dim + i] - values[2+3*dim + i])/(npts[0]-1); + } + + for (int j = 0, cnt = 0, cntLoc = 0; j < npts[1]; ++j) + { + for (int i = 0; i < npts[0]; ++i, ++cnt) + { + if (cnt >= start && cnt < end) + { + for (int n = 0; n < dim; ++n) + { + pts[n][cntLoc] = + (values[2+n] + i * delta1[n]) * + (1.0 - j / ((NekDouble)(npts[1]-1))) + + (values[2 + 3*dim + n] + i * delta2[n]) * + ( j / ((NekDouble)(npts[1]-1))); + } + ++cntLoc; + } + } + } + + vector ppe; + ppe.push_back(npts[0]); + ppe.push_back(npts[1]); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsPlane); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + } + else if (m_config["box"].as().compare("NotSet") != 0) + { + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["box"].as().c_str(), values), + "Failed to interpret box string"); + + ASSERTL0(values.size() == 9, + "box string should contain 9 values " + "N1,N2,N3,xmin,xmax,ymin,ymax,zmin,zmax"); + + int dim = 3; + Array npts(3); + npts[0] = values[0]; + npts[1] = values[1]; + npts[2] = values[2]; + + int totpts = npts[0]*npts[1]*npts[2]; + + Array > pts(dim); + Array delta(dim); + + // Information for partitioning + int ptsPerProc = totpts / nprocs; + int extraPts = (rank < nprocs - 1) ? 0: totpts % nprocs; + int locPts = ptsPerProc + extraPts; + int start = rank * ptsPerProc; + int end = start + locPts; + + for (int i = 0; i < dim; ++i) + { + pts[i] = Array(locPts); + delta[i] = (values[4 + 2*i] - values[3 + 2*i]) / (npts[i] - 1); + } + + + + for (int k = 0, cnt = 0, cntLoc = 0; k < npts[2]; ++k) + { + for (int j = 0; j < npts[1]; ++j) + { + for (int i = 0; i < npts[0]; ++i, ++cnt) + { + if (cnt >= start && cnt < end) + { + pts[0][cntLoc] = values[3] + i * delta[0]; + pts[1][cntLoc] = values[5] + j * delta[1]; + pts[2][cntLoc] = values[7] + k * delta[2]; + ++cntLoc; + } + } + } + } + + vector ppe; + ppe.push_back(npts[0]); + ppe.push_back(npts[1]); + ppe.push_back(npts[2]); + m_f->m_fieldPts = + MemoryManager::AllocateSharedPtr(dim, + pts); + m_f->m_fieldPts->SetPtsType(LibUtilities::ePtsBox); + m_f->m_fieldPts->SetPointsPerEdge(ppe); + vector boxdim; + boxdim.assign(&values[3], &values[3] + 6); + m_f->m_fieldPts->SetBoxSize(boxdim); + } + else + { + ASSERTL0(false, + "ProcessInterpPtsToPts requires line, plane or box option."); + } +} + +void ProcessInterpPtsToPts::InterpolatePtsToPts( + LibUtilities::PtsFieldSharedPtr &fromPts, + LibUtilities::PtsFieldSharedPtr &toPts, + NekDouble clamp_low, + NekDouble clamp_up, + NekDouble def_value) +{ + ASSERTL0(toPts->GetNFields() >= fromPts->GetNFields(), + "ptField has too few fields"); + + int nfields = fromPts->GetNFields(); + + Interpolator interp; + if (m_f->m_comm->GetRank() == 0) + { + interp.SetProgressCallback(&ProcessInterpPtsToPts::PrintProgressbar, + this); + } + interp.Interpolate(fromPts, toPts); + if (m_f->m_comm->GetRank() == 0) + { + cout << endl; + } + + for (int f = 0; f < nfields; ++f) + { + for (int i = 0; i < toPts->GetNpoints(); ++i) + { + if (toPts->GetPointVal(f, i) > clamp_up) + { + toPts->SetPointVal(f, i, clamp_up); + } + else if (toPts->GetPointVal(f, i) < clamp_low) + { + toPts->SetPointVal(f, i, clamp_low); + } + } + } +} + +void ProcessInterpPtsToPts::calcCp0() +{ + LibUtilities::PtsFieldSharedPtr pts = m_f->m_fieldPts; + int dim = pts->GetDim(); + int nq1 = pts->GetNpoints(); + int r, f; + int pfield = -1; + NekDouble p0,qinv; + vector velid; + + vector values; + ASSERTL0(ParseUtils::GenerateUnOrderedVector( + m_config["cp"].as().c_str(),values), + "Failed to interpret cp string"); + + ASSERTL0(values.size() == 2, + "cp string should contain 2 values " + "p0 and q (=1/2 rho u^2)"); + + p0 = values[0]; + qinv = 1.0/values[1]; + + for(int i = 0; i < pts->GetNFields(); ++i) + { + if(boost::iequals(pts->GetFieldName(i),"p")) + { + pfield = i; + } + + if(boost::iequals(pts->GetFieldName(i),"u")|| + boost::iequals(pts->GetFieldName(i),"v")|| + boost::iequals(pts->GetFieldName(i),"w")) + { + velid.push_back(i); + } + } + + if(pfield != -1) + { + if(!velid.size()) + { + WARNINGL0(false,"Did not find velocity components for Cp0"); + } + } + else + { + WARNINGL0(false,"Failed to find 'p' field to determine cp0"); + } + + // Allocate data storage + Array > data(2); + + for (f = 0; f < 2; ++f) + { + data[f] = Array< OneD, NekDouble>(nq1, 0.0); + } + + for (r = 0; r < nq1; r++) + { + if(pfield != -1) // calculate cp + { + data[0][r] = qinv*(pts->GetPointVal(dim + pfield, r) - p0); + + if(velid.size()) // calculate cp0 + { + NekDouble q = 0; + for(int i = 0; i < velid.size(); ++i) + { + q += 0.5*pts->GetPointVal(dim + velid[i], r)* + pts->GetPointVal(dim + velid[i], r); + } + data[1][r] = qinv*(pts->GetPointVal(dim + pfield, r)+q - p0); + } + } + } + + if(pfield != -1) + { + pts->AddField(data[0], "Cp"); + m_f->m_variables.push_back("Cp"); + if(velid.size()) + { + pts->AddField(data[1], "Cp0"); + m_f->m_variables.push_back("Cp0"); + } + } +} + +void ProcessInterpPtsToPts::PrintProgressbar(const int position, + const int goal) const +{ + LibUtilities::PrintProgressbar(position, goal, "Interpolating"); +} +} +} diff --git a/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.h b/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.h new file mode 100644 index 0000000000000000000000000000000000000000..6a79ce105a760b8ded53dab0286af45cf005e886 --- /dev/null +++ b/library/FieldUtils/ProcessModules/ProcessInterpPtsToPts.h @@ -0,0 +1,98 @@ +//////////////////////////////////////////////////////////////////////////////// +// +// File: ProcessInterpPtsToPts.h +// +// For more information, please see: http://www.nektar.info/ +// +// The MIT License +// +// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), +// Department of Aeronautics, Imperial College London (UK), and Scientific +// Computing and Imaging Institute, University of Utah (USA). +// +// License for the specific language governing rights and limitations under +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// +// Description: Interp point data. +// +//////////////////////////////////////////////////////////////////////////////// + +#ifndef FIELDUTILS_PROCESSINTERPPOINTS +#define FIELDUTILS_PROCESSINTERPPOINTS + +#include "../Module.h" + +#include + +namespace Nektar +{ +namespace FieldUtils +{ + +/** + * @brief This processing module interpolates one field to another + */ +class ProcessInterpPtsToPts : public ProcessModule +{ +public: + /// Creates an instance of this class + static boost::shared_ptr create(FieldSharedPtr f) + { + return MemoryManager::AllocateSharedPtr(f); + } + static ModuleKey className; + + ProcessInterpPtsToPts(FieldSharedPtr f); + virtual ~ProcessInterpPtsToPts(); + + /// Write mesh to output file. + virtual void Process(po::variables_map &vm); + + void PrintProgressbar(const int position, const int goal) const; + + virtual std::string GetModuleName() + { + return "ProcessInterpPtsToPts"; + } + + virtual std::string GetModuleDescription() + { + return "Interpolating to points"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyPts; + } + +private: + void CreateFieldPts(po::variables_map &vm); + + void InterpolatePtsToPts(LibUtilities::PtsFieldSharedPtr &fromPts, + LibUtilities::PtsFieldSharedPtr &toPts, + NekDouble clamp_low, + NekDouble clamp_up, + NekDouble def_value); + + void calcCp0(); +}; +} +} + +#endif diff --git a/library/FieldUtils/ProcessModules/ProcessIsoContour.cpp b/library/FieldUtils/ProcessModules/ProcessIsoContour.cpp index a33277d93d6bfa0dd7e5b8e8c253b3bc17db3cfd..4433ad707e9de154edb8c852c37a88fbd64fdb69 100644 --- a/library/FieldUtils/ProcessModules/ProcessIsoContour.cpp +++ b/library/FieldUtils/ProcessModules/ProcessIsoContour.cpp @@ -35,18 +35,19 @@ #include #include +#include +#include +#include +#include + #include "ProcessIsoContour.h" #include +#include #include #include #include -#include -#include -#include -#include - namespace bg = boost::geometry; namespace bgi = boost::geometry::index; @@ -67,7 +68,7 @@ ModuleKey ProcessIsoContour::className = "smoothing"); ProcessIsoContour::ProcessIsoContour(FieldSharedPtr f) : - ProcessEquiSpacedOutput(f) + ProcessModule(f) { m_config["fieldstr"] = ConfigOption(false, "NotSet", @@ -115,38 +116,35 @@ ProcessIsoContour::~ProcessIsoContour(void) void ProcessIsoContour::Process(po::variables_map &vm) { - Timer timer; - int rank = m_f->m_comm->GetRank(); - - if(m_f->m_verbose) - { - if(rank == 0) - { - cout << "Process Contour extraction..." << endl; - timer.Start(); - } - } + bool verbose = (m_f->m_verbose && m_f->m_comm->TreatAsRankZero()); vector iso; - if(m_f->m_fieldPts.get()) // assume we have read .dat file to directly input dat file. + ASSERTL0(m_f->m_fieldPts.get(), + "Should have m_fieldPts for IsoContour."); + + if(m_f->m_fieldPts->GetPtsType() == LibUtilities::ePtsTriBlock) { - if(rank == 0) + // assume we have read .dat file to directly input dat file. + if(verbose) { - cout << "Process read iso from Field Pts" << endl; + cout << "\t Process read iso from Field Pts" << endl; } SetupIsoFromFieldPts(iso); } - else // extract isocontour from field + else if(m_f->m_fieldPts->GetPtsType() == LibUtilities::ePtsTetBlock) { - if(m_f->m_exp.size() == 0) + if(m_config["fieldstr"].m_beenSet) { - return; + string fieldName = m_config["fieldname"].as(); + m_f->m_variables.push_back(fieldName); } - // extract all fields to equi-spaced - SetupEquiSpacedField(); + if(m_f->m_fieldPts->GetNpoints() == 0) + { + return; + } int fieldid; NekDouble value; @@ -193,7 +191,10 @@ void ProcessIsoContour::Process(po::variables_map &vm) value = m_config["fieldvalue"].as(); iso = ExtractContour(fieldid,value); - + } + else + { + ASSERTL0(false, "PtsType not supported for isocontour."); } // Process isocontour @@ -201,14 +202,14 @@ void ProcessIsoContour::Process(po::variables_map &vm) bool globalcondense = m_config["globalcondense"].m_beenSet; if(globalcondense) { - if(rank == 0) + if(verbose) { - cout << "Process global condense ..." << endl; + cout << "\t Process global condense ..." << endl; } int nfields = m_f->m_fieldPts->GetNFields() + m_f->m_fieldPts->GetDim(); IsoSharedPtr g_iso = MemoryManager::AllocateSharedPtr(nfields-3); - g_iso->GlobalCondense(iso,m_f->m_verbose); + g_iso->GlobalCondense(iso,verbose); iso.clear(); @@ -217,15 +218,12 @@ void ProcessIsoContour::Process(po::variables_map &vm) if(smoothing) { - Timer timersm; + LibUtilities::Timer timersm; - if(m_f->m_verbose) + if(verbose) { - if(rank == 0) - { - cout << "Process Contour smoothing ..." << endl; - timersm.Start(); - } + cout << "\t Process Contour smoothing ..." << endl; + timersm.Start(); } int niter = m_config["smoothiter"].as(); @@ -236,19 +234,16 @@ void ProcessIsoContour::Process(po::variables_map &vm) iso[i]->Smooth(niter,lambda,-mu); } - if(m_f->m_verbose) + if(verbose) { - if(rank == 0) - { - timersm.Stop(); - NekDouble cpuTime = timersm.TimePerTest(1); - - stringstream ss; - ss << cpuTime << "s"; - cout << "Process smooth CPU Time: " << setw(8) << left - << ss.str() << endl; - cpuTime = 0.0; - } + timersm.Stop(); + NekDouble cpuTime = timersm.TimePerTest(1); + + stringstream ss; + ss << cpuTime << "s"; + cout << "\t Process smooth CPU Time: " << setw(8) << left + << ss.str() << endl; + cpuTime = 0.0; } } @@ -257,16 +252,16 @@ void ProcessIsoContour::Process(po::variables_map &vm) { vector new_iso; - if(rank == 0) + if(verbose) { - cout << "Identifying separate regions [." << flush ; + cout << "\t Identifying separate regions [." << flush ; } for(int i =0 ; i < iso.size(); ++i) { iso[i]->SeparateRegions(new_iso,mincontour,m_f->m_verbose); } - if(rank == 0) + if(verbose) { cout << "]" << endl << flush ; } @@ -276,22 +271,6 @@ void ProcessIsoContour::Process(po::variables_map &vm) } ResetFieldPts(iso); - - - if(m_f->m_verbose) - { - if(rank == 0) - { - timer.Stop(); - NekDouble cpuTime = timer.TimePerTest(1); - - stringstream ss; - ss << cpuTime << "s"; - cout << "Process Isocontour CPU Time: " << setw(8) << left - << ss.str() << endl; - cpuTime = 0.0; - } - } } @@ -853,14 +832,14 @@ void Iso::GlobalCondense(vector &iso, bool verbose) if(verbose) { - cout << "Process building tree ..." << endl; + cout << "\t Process building tree ..." << endl; } //Build tree bgi::rtree > rtree; rtree.insert(inPoints.begin(), inPoints.end()); - //Find neipghbours + //Find neighbours int unique_index = 0; int prog=0; for(i = 0; i < m_nvert; ++i) @@ -881,7 +860,6 @@ void Iso::GlobalCondense(vector &iso, bool verbose) } else { - // find nearest 10 points within the distance box std::vector result; rtree.query(bgi::nearest(queryPoint, 10), std::back_inserter(result)); diff --git a/library/FieldUtils/ProcessModules/ProcessIsoContour.h b/library/FieldUtils/ProcessModules/ProcessIsoContour.h index 84c228478feaf6200c56039a621916d3042586f5..7e00f6c92344708428cacea7037519efb64f2877 100644 --- a/library/FieldUtils/ProcessModules/ProcessIsoContour.h +++ b/library/FieldUtils/ProcessModules/ProcessIsoContour.h @@ -39,15 +39,6 @@ #include "../Module.h" #include "ProcessEquiSpacedOutput.h" -#include -#include -#include -#include - -namespace bg = boost::geometry; -namespace bgi = boost::geometry::index; - - namespace Nektar { namespace FieldUtils @@ -235,7 +226,7 @@ class IsoVertex /** * @brief This processing module extracts an isocontour */ -class ProcessIsoContour : public ProcessEquiSpacedOutput +class ProcessIsoContour : public ProcessModule { public: /// Creates an instance of this class @@ -251,6 +242,21 @@ class ProcessIsoContour : public ProcessEquiSpacedOutput /// Write mesh to output file. virtual void Process(po::variables_map &vm); + virtual std::string GetModuleName() + { + return "ProcessIsoContour"; + } + + virtual std::string GetModuleDescription() + { + return "Extracting contour"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyPts; + } + protected: ProcessIsoContour(){}; void ResetFieldPts(vector &iso); diff --git a/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.cpp b/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.cpp index 8d1d76f6a601edffc1ebf069408473c40ba9e5a8..657adb4ac62095d08326c183677ca55fb9314fed 100644 --- a/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.cpp +++ b/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.cpp @@ -66,27 +66,40 @@ ProcessJacobianEnergy::~ProcessJacobianEnergy() void ProcessJacobianEnergy::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int nfields = m_f->m_variables.size(); + m_f->m_variables.push_back("jacenergy"); + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessJacobianEnergy: Processing Jacobian..." << endl; - } + return; + } + + int NumHomogeneousDir = m_f->m_numHomogeneousDir; + MultiRegions::ExpListSharedPtr exp; + + if (nfields) + { + m_f->m_exp.resize(nfields + 1); + exp = m_f->AppendExpList(NumHomogeneousDir); + + m_f->m_exp[nfields] = exp; + } + else + { + exp = m_f->m_exp[0]; } - Array phys = m_f->m_exp[0]->UpdatePhys(); - Array coeffs = m_f->m_exp[0]->UpdateCoeffs(); - Array tmp, tmp1; + Array phys = exp->UpdatePhys(); + Array coeffs = exp->UpdateCoeffs(); + Array tmp; - for (int i = 0; i < m_f->m_exp[0]->GetExpSize(); ++i) + for (int i = 0; i < exp->GetExpSize(); ++i) { // copy Jacobian into field - StdRegions::StdExpansionSharedPtr Elmt = m_f->m_exp[0]->GetExp(i); + StdRegions::StdExpansionSharedPtr Elmt = exp->GetExp(i); - int ncoeffs = Elmt->GetNcoeffs(); int nquad = Elmt->GetTotPoints(); - int coeffoffset = m_f->m_exp[0]->GetCoeff_Offset(i); - Array coeffs1(ncoeffs); + int coeffoffset = exp->GetCoeff_Offset(i); Array Jac = Elmt->GetMetricInfo()->GetJac(Elmt->GetPointsKeys()); if (Elmt->GetMetricInfo()->GetGtype() == SpatialDomains::eRegular) @@ -113,19 +126,7 @@ void ProcessJacobianEnergy::Process(po::variables_map &vm) Elmt->FwdTrans(phys, tmp = coeffs + coeffoffset); } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back("JacobianEnergy"); - m_f->m_exp[0]->AppendFieldData(FieldDef[i], FieldData[i]); - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; + exp->BwdTrans(coeffs, phys); } } } diff --git a/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.h b/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.h index 0e9e0372a6cad9bdd750d2673001ded0f8c31eac..fcdc98fac8970612bd1d135ccf0dd6e6cd78ed5b 100644 --- a/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.h +++ b/library/FieldUtils/ProcessModules/ProcessJacobianEnergy.h @@ -65,6 +65,16 @@ public: return "ProcessJacobianEnergy"; } + virtual std::string GetModuleDescription() + { + return "Processing Jacobian"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessMapping.cpp b/library/FieldUtils/ProcessModules/ProcessMapping.cpp index 2b7deef56852990b3b30df825d458204fff1aaa6..dcb49eba34deb45d85759a1a0969d5adc575720d 100644 --- a/library/FieldUtils/ProcessModules/ProcessMapping.cpp +++ b/library/FieldUtils/ProcessModules/ProcessMapping.cpp @@ -62,25 +62,29 @@ ProcessMapping::~ProcessMapping() void ProcessMapping::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessMapping: Applying mapping to field..." << endl; - } - } - // Determine dimensions of mesh, solution, etc... int npoints = m_f->m_exp[0]->GetNpoints(); int expdim = m_f->m_graph->GetMeshDimension(); int spacedim = expdim; - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) + if ((m_f->m_numHomogeneousDir) == 1 || (m_f->m_numHomogeneousDir) == 2) { spacedim = 3; } - int nfields = m_f->m_fielddef[0]->m_fields.size(); + int nfields = m_f->m_variables.size(); int addfields = expdim; + + string fieldNames[3] = {"xCoord", "yCoord", "zCoord"}; + for (int i = 0; i < addfields; ++i) + { + m_f->m_variables.push_back(fieldNames[i]); + } + + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; + } + m_f->m_exp.resize(nfields + addfields); // Load mapping @@ -135,41 +139,15 @@ void ProcessMapping::Process(po::variables_map &vm) mapping->GetCartesianCoordinates(coords[0], coords[1], coords[2]); // Add new information to m_f - string fieldNames[3] = {"xCoord", "yCoord", "zCoord"}; - vector outname; for (int i = 0; i < addfields; ++i) { m_f->m_exp[nfields + i] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir); - m_f->m_exp[nfields + i]->UpdatePhys() = coords[i]; + m_f->AppendExpList(m_f->m_numHomogeneousDir); + Vmath::Vcopy(npoints, coords[i], 1, + m_f->m_exp[nfields + i]->UpdatePhys(), 1); m_f->m_exp[nfields + i]->FwdTrans_IterPerExp( coords[i], m_f->m_exp[nfields + i]->UpdateCoeffs()); - outname.push_back(fieldNames[i]); } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int j = 0; j < nfields + addfields; ++j) - { - for (int i = 0; i < FieldDef.size(); ++i) - { - if (j >= nfields) - { - FieldDef[i]->m_fields.push_back(outname[j - nfields]); - } - else - { - FieldDef[i]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } GlobalMapping::MappingSharedPtr ProcessMapping::GetMapping(FieldSharedPtr f) @@ -195,12 +173,7 @@ GlobalMapping::MappingSharedPtr ProcessMapping::GetMapping(FieldSharedPtr f) // Get field information int npoints = f->m_exp[0]->GetNpoints(); int expdim = f->m_graph->GetMeshDimension(); - int spacedim = expdim; - if ((f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (f->m_fielddef[0]->m_numHomogeneousDir) == 2) - { - spacedim = 3; - } + int spacedim = expdim + f->m_numHomogeneousDir; // Declare coordinates storage Array > coords_new(3); diff --git a/library/FieldUtils/ProcessModules/ProcessMapping.h b/library/FieldUtils/ProcessModules/ProcessMapping.h index e44d35c4f84c45347ddc60ba3d0486b8dafb8c01..718aed10ee3e4951d09cb41be2162b8e631489ca 100644 --- a/library/FieldUtils/ProcessModules/ProcessMapping.h +++ b/library/FieldUtils/ProcessModules/ProcessMapping.h @@ -69,10 +69,18 @@ public: return "ProcessMapping"; } + virtual std::string GetModuleDescription() + { + return "Applying mapping to field"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + static GlobalMapping::MappingSharedPtr GetMapping(FieldSharedPtr f); -private: - FieldSharedPtr m_fromField; }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessMeanMode.cpp b/library/FieldUtils/ProcessModules/ProcessMeanMode.cpp index 704c5ad7042ed110f25c24d54e06424ac8ed7e23..25b443d93404045438e7e58fd91d3b7c18187eca 100644 --- a/library/FieldUtils/ProcessModules/ProcessMeanMode.cpp +++ b/library/FieldUtils/ProcessModules/ProcessMeanMode.cpp @@ -63,14 +63,6 @@ ProcessMeanMode::~ProcessMeanMode() void ProcessMeanMode::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessMeanMode: Extracting mean mode..." << endl; - } - } - // Set parameters for mean mode RegisterConfig("planeid", "0"); RegisterConfig("wavespace", "1"); diff --git a/library/FieldUtils/ProcessModules/ProcessMeanMode.h b/library/FieldUtils/ProcessModules/ProcessMeanMode.h index a17a100d8a4005b4327574b3645137d0a1328db2..58b1e646ba2f53bee83b9ebb056c87c1602b3878 100644 --- a/library/FieldUtils/ProcessModules/ProcessMeanMode.h +++ b/library/FieldUtils/ProcessModules/ProcessMeanMode.h @@ -67,6 +67,17 @@ public: { return "ProcessMeanMode"; } + + virtual std::string GetModuleDescription() + { + return "Extracting mean mode"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessMultiShear.cpp b/library/FieldUtils/ProcessModules/ProcessMultiShear.cpp index 420b109ae430fcfb5c2aae4918c6c8908bde83ca..8e96163e4eece09b7dcf3093581ff7da737e0e09 100644 --- a/library/FieldUtils/ProcessModules/ProcessMultiShear.cpp +++ b/library/FieldUtils/ProcessModules/ProcessMultiShear.cpp @@ -61,11 +61,6 @@ ProcessMultiShear::ProcessMultiShear(FieldSharedPtr f) : ProcessModule(f) m_config["fromfld"] = ConfigOption( false, "NotSet", "First fld file. First underscore flags position of id in name."); - - ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, - "Need to specify fromfld=file.fld "); - - m_f->m_fldToBnd = false; } ProcessMultiShear::~ProcessMultiShear() @@ -74,22 +69,22 @@ ProcessMultiShear::~ProcessMultiShear() void ProcessMultiShear::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessMultiShear: Calculating shear stress metrics..." - << endl; - } + return; } + ASSERTL0(m_config["fromfld"].as().compare("NotSet") != 0, + "Need to specify fromfld=file.fld "); + int nstart, i, j, nfields=0; bool wssg = false; NekDouble nfld = m_config["N"].as(); string fromfld, basename, endname, nstartStr; stringstream filename; vector infiles(nfld); - vector > m_fromField(nfld); + vector > fromField(nfld); // Set up list of input fld files. fromfld = m_config["fromfld"].as(); @@ -113,9 +108,9 @@ void ProcessMultiShear::Process(po::variables_map &vm) for (i = 0; i < nfld; ++i) { - m_fromField[i] = boost::shared_ptr(new Field()); - m_fromField[i]->m_session = m_f->m_session; - m_fromField[i]->m_graph = m_f->m_graph; + fromField[i] = boost::shared_ptr(new Field()); + fromField[i]->m_session = m_f->m_session; + fromField[i]->m_graph = m_f->m_graph; } // Import all fld files. @@ -131,19 +126,19 @@ void ProcessMultiShear::Process(po::variables_map &vm) m_f->m_exp[0]->GetExp(j)->GetGeom()->GetGlobalID(); } m_f->FieldIOForFile(infiles[i])->Import( - infiles[i], m_fromField[i]->m_fielddef, m_fromField[i]->m_data, + infiles[i], fromField[i]->m_fielddef, fromField[i]->m_data, LibUtilities::NullFieldMetaDataMap, ElementGIDs); } else { m_f->FieldIOForFile(infiles[i])->Import( - infiles[i], m_fromField[i]->m_fielddef, m_fromField[i]->m_data, + infiles[i], fromField[i]->m_fielddef, fromField[i]->m_data, LibUtilities::NullFieldMetaDataMap); } - nfields = m_fromField[i]->m_fielddef[0]->m_fields.size(); + nfields = fromField[i]->m_fielddef[0]->m_fields.size(); int NumHomogeneousDir = - m_fromField[i]->m_fielddef[0]->m_numHomogeneousDir; + fromField[i]->m_fielddef[0]->m_numHomogeneousDir; if (nfields == 5 || nfields == 7) { @@ -151,36 +146,36 @@ void ProcessMultiShear::Process(po::variables_map &vm) } // Set up Expansion information to use mode order from field - m_fromField[i]->m_graph->SetExpansions(m_fromField[i]->m_fielddef); + fromField[i]->m_graph->SetExpansions(fromField[i]->m_fielddef); // Set up expansions, and extract data. - m_fromField[i]->m_exp.resize(nfields); - m_fromField[i]->m_exp[0] = - m_fromField[i]->SetUpFirstExpList(NumHomogeneousDir, true); + fromField[i]->m_exp.resize(nfields); + fromField[i]->m_exp[0] = + fromField[i]->SetUpFirstExpList(NumHomogeneousDir, true); for (j = 1; j < nfields; ++j) { - m_fromField[i]->m_exp[j] = m_f->AppendExpList(NumHomogeneousDir); + fromField[i]->m_exp[j] = m_f->AppendExpList(NumHomogeneousDir); } for (j = 0; j < nfields; ++j) { - for (int k = 0; k < m_fromField[i]->m_data.size(); ++k) + for (int k = 0; k < fromField[i]->m_data.size(); ++k) { - m_fromField[i]->m_exp[j]->ExtractDataToCoeffs( - m_fromField[i]->m_fielddef[k], m_fromField[i]->m_data[k], - m_fromField[i]->m_fielddef[k]->m_fields[j], - m_fromField[i]->m_exp[j]->UpdateCoeffs()); + fromField[i]->m_exp[j]->ExtractDataToCoeffs( + fromField[i]->m_fielddef[k], fromField[i]->m_data[k], + fromField[i]->m_fielddef[k]->m_fields[j], + fromField[i]->m_exp[j]->UpdateCoeffs()); } - m_fromField[i]->m_exp[j]->BwdTrans( - m_fromField[i]->m_exp[j]->GetCoeffs(), - m_fromField[i]->m_exp[j]->UpdatePhys()); + fromField[i]->m_exp[j]->BwdTrans( + fromField[i]->m_exp[j]->GetCoeffs(), + fromField[i]->m_exp[j]->UpdatePhys()); } } int spacedim = m_f->m_graph->GetSpaceDimension(); - if ((m_fromField[0]->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_fromField[0]->m_fielddef[0]->m_numHomogeneousDir) == 2) + if ((fromField[0]->m_fielddef[0]->m_numHomogeneousDir) == 1 || + (fromField[0]->m_fielddef[0]->m_numHomogeneousDir) == 2) { spacedim = 3; } @@ -191,7 +186,7 @@ void ProcessMultiShear::Process(po::variables_map &vm) nout = 6; } - int npoints = m_fromField[0]->m_exp[0]->GetNpoints(); + int npoints = fromField[0]->m_exp[0]->GetNpoints(); Array > normTemporalMeanVec(spacedim), normCrossDir(spacedim), outfield(nout), dtemp(spacedim); Array TemporalMeanMag(npoints, 0.0), @@ -218,7 +213,7 @@ void ProcessMultiShear::Process(po::variables_map &vm) { for (j = 0; j < spacedim; ++j) { - Vmath::Vadd(npoints, m_fromField[i]->m_exp[j]->GetPhys(), 1, + Vmath::Vadd(npoints, fromField[i]->m_exp[j]->GetPhys(), 1, normTemporalMeanVec[j], 1, normTemporalMeanVec[j], 1); } } @@ -243,21 +238,21 @@ void ProcessMultiShear::Process(po::variables_map &vm) if (wssg) // cross product with normals to obtain direction parallel to // temporal mean vector. { - Vmath::Vmul(npoints, m_fromField[0]->m_exp[nfields - 1]->GetPhys(), 1, + Vmath::Vmul(npoints, fromField[0]->m_exp[nfields - 1]->GetPhys(), 1, normTemporalMeanVec[1], 1, normCrossDir[0], 1); - Vmath::Vvtvm(npoints, m_fromField[0]->m_exp[nfields - 2]->GetPhys(), 1, + Vmath::Vvtvm(npoints, fromField[0]->m_exp[nfields - 2]->GetPhys(), 1, normTemporalMeanVec[2], 1, normCrossDir[0], 1, normCrossDir[0], 1); - Vmath::Vmul(npoints, m_fromField[0]->m_exp[nfields - 3]->GetPhys(), 1, + Vmath::Vmul(npoints, fromField[0]->m_exp[nfields - 3]->GetPhys(), 1, normTemporalMeanVec[2], 1, normCrossDir[1], 1); - Vmath::Vvtvm(npoints, m_fromField[0]->m_exp[nfields - 1]->GetPhys(), 1, + Vmath::Vvtvm(npoints, fromField[0]->m_exp[nfields - 1]->GetPhys(), 1, normTemporalMeanVec[0], 1, normCrossDir[1], 1, normCrossDir[1], 1); - Vmath::Vmul(npoints, m_fromField[0]->m_exp[nfields - 2]->GetPhys(), 1, + Vmath::Vmul(npoints, fromField[0]->m_exp[nfields - 2]->GetPhys(), 1, normTemporalMeanVec[0], 1, normCrossDir[2], 1); - Vmath::Vvtvm(npoints, m_fromField[0]->m_exp[nfields - 3]->GetPhys(), 1, + Vmath::Vvtvm(npoints, fromField[0]->m_exp[nfields - 3]->GetPhys(), 1, normTemporalMeanVec[1], 1, normCrossDir[2], 1, normCrossDir[2], 1); } @@ -267,19 +262,19 @@ void ProcessMultiShear::Process(po::variables_map &vm) { for (j = 0; j < spacedim; ++j) { - Vmath::Vvtvp(npoints, m_fromField[i]->m_exp[j]->GetPhys(), 1, + Vmath::Vvtvp(npoints, fromField[i]->m_exp[j]->GetPhys(), 1, normTemporalMeanVec[j], 1, DotProduct, 1, DotProduct, 1); } // TAWSS - Vmath::Vadd(npoints, m_fromField[i]->m_exp[spacedim]->GetPhys(), 1, + Vmath::Vadd(npoints, fromField[i]->m_exp[spacedim]->GetPhys(), 1, outfield[0], 1, outfield[0], 1); // transWSS Vmath::Vmul(npoints, DotProduct, 1, DotProduct, 1, temp, 1); - Vmath::Vvtvm(npoints, m_fromField[i]->m_exp[spacedim]->GetPhys(), 1, - m_fromField[i]->m_exp[spacedim]->GetPhys(), 1, temp, 1, + Vmath::Vvtvm(npoints, fromField[i]->m_exp[spacedim]->GetPhys(), 1, + fromField[i]->m_exp[spacedim]->GetPhys(), 1, temp, 1, temp, 1); for (j = 0; j < npoints; ++j) @@ -292,7 +287,7 @@ void ProcessMultiShear::Process(po::variables_map &vm) // TAAFI Vmath::Vdiv(npoints, DotProduct, 1, - m_fromField[i]->m_exp[spacedim]->GetPhys(), 1, temp, 1); + fromField[i]->m_exp[spacedim]->GetPhys(), 1, temp, 1); Vmath::Vadd(npoints, temp, 1, outfield[3], 1, outfield[3], 1); // TACFI @@ -311,7 +306,7 @@ void ProcessMultiShear::Process(po::variables_map &vm) // parallel component: Vmath::Zero(npoints, temp, 1); - m_fromField[i]->m_exp[0]->PhysDeriv(DotProduct, dtemp[0], dtemp[1], + fromField[i]->m_exp[0]->PhysDeriv(DotProduct, dtemp[0], dtemp[1], dtemp[2]); for (j = 0; j < spacedim; j++) { @@ -325,10 +320,10 @@ void ProcessMultiShear::Process(po::variables_map &vm) for (j = 0; j < spacedim; ++j) { - Vmath::Vvtvp(npoints, m_fromField[i]->m_exp[j]->GetPhys(), 1, + Vmath::Vvtvp(npoints, fromField[i]->m_exp[j]->GetPhys(), 1, normCrossDir[j], 1, DotProduct, 1, DotProduct, 1); } - m_fromField[i]->m_exp[0]->PhysDeriv(DotProduct, dtemp[0], dtemp[1], + fromField[i]->m_exp[0]->PhysDeriv(DotProduct, dtemp[0], dtemp[1], dtemp[2]); Vmath::Zero(npoints, DotProduct, 1); @@ -368,7 +363,7 @@ void ProcessMultiShear::Process(po::variables_map &vm) */ m_f->m_exp.resize(nout); - m_f->m_fielddef = m_fromField[0]->m_fielddef; + m_f->m_fielddef = fromField[0]->m_fielddef; m_f->m_exp[0] = m_f->SetUpFirstExpList(m_f->m_fielddef[0]->m_numHomogeneousDir, true); @@ -391,28 +386,14 @@ void ProcessMultiShear::Process(po::variables_map &vm) Vmath::Smul(npoints, 1.0 / nfld, outfield[5], 1, outfield[5], 1); } + m_f->m_variables = m_f->m_fielddef[0]->m_fields; + for (i = 0; i < nout; ++i) { m_f->m_exp[i]->FwdTrans(outfield[i], m_f->m_exp[i]->UpdateCoeffs()); m_f->m_exp[i]->BwdTrans(m_f->m_exp[i]->GetCoeffs(), m_f->m_exp[i]->UpdatePhys()); } - - std::vector FieldDef = - m_fromField[0]->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (i = 0; i < nout; ++i) - { - for (j = 0; j < FieldDef.size(); ++j) - { - FieldDef[j]->m_fields.push_back(m_f->m_fielddef[0]->m_fields[i]); - m_f->m_exp[i]->AppendFieldData(FieldDef[j], FieldData[j]); - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessMultiShear.h b/library/FieldUtils/ProcessModules/ProcessMultiShear.h index eea1228e3e3f0900934798166899551a4bbe3aa0..2d71c7ed1784dbecfa1e083ddd8ccf4020d612a3 100644 --- a/library/FieldUtils/ProcessModules/ProcessMultiShear.h +++ b/library/FieldUtils/ProcessModules/ProcessMultiShear.h @@ -67,6 +67,17 @@ public: { return "ProcessMultiShear"; } + + virtual std::string GetModuleDescription() + { + return "Calculating shear stress metrics"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessNumModes.cpp b/library/FieldUtils/ProcessModules/ProcessNumModes.cpp index b119cc9c5116b90b717b2459b7e232135449438f..c9c5e23af311e3836db9d451907854b6cf539224 100644 --- a/library/FieldUtils/ProcessModules/ProcessNumModes.cpp +++ b/library/FieldUtils/ProcessModules/ProcessNumModes.cpp @@ -64,33 +64,40 @@ ProcessNumModes::~ProcessNumModes() void ProcessNumModes::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int i, s; + int expdim = m_f->m_graph->GetMeshDimension(); + int nfields = m_f->m_variables.size(); + int addfields = expdim; + + m_f->m_variables.push_back("P1"); + if (addfields >= 2) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessNumModes: Calculating number of modes..." << endl; - } + m_f->m_variables.push_back("P2"); + } + if (addfields == 3) + { + m_f->m_variables.push_back("P3"); + } + + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; } - int i, j, s; - int expdim = m_f->m_graph->GetMeshDimension(); - int nfields = m_f->m_fielddef[0]->m_fields.size(); - int addfields = expdim; int npoints = m_f->m_exp[0]->GetNpoints(); Array > outfield(addfields); - int nstrips; m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - m_f->m_exp.resize(nfields * nstrips); - for (i = 0; i < addfields; ++i) { outfield[i] = Array(npoints); } - vector Exp(nstrips * addfields); + vector::iterator it; + MultiRegions::ExpListSharedPtr Exp; int nExp, nq, offset; nExp = m_f->m_exp[0]->GetExpSize(); @@ -112,67 +119,15 @@ void ProcessNumModes::Process(po::variables_map &vm) { for (i = 0; i < addfields; ++i) { - int n = s * addfields + i; - Exp[n] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir); - Vmath::Vcopy(npoints, outfield[i], 1, Exp[n]->UpdatePhys(), 1); - Exp[n]->FwdTrans_IterPerExp(outfield[i], Exp[n]->UpdateCoeffs()); - } - } + Exp = m_f->AppendExpList(m_f->m_numHomogeneousDir); + Vmath::Vcopy(npoints, outfield[i], 1, Exp->UpdatePhys(), 1); + Exp->FwdTrans_IterPerExp(outfield[i], Exp->UpdateCoeffs()); - vector::iterator it; - for (s = 0; s < nstrips; ++s) - { - for (i = 0; i < addfields; ++i) - { it = m_f->m_exp.begin() + s * (nfields + addfields) + nfields + i; - m_f->m_exp.insert(it, Exp[s * addfields + i]); - } - } - - vector outname; - outname.push_back("P1"); - if (addfields >= 2) - { - outname.push_back("P2"); - } - - if (addfields == 3) - { - outname.push_back("P3"); - } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - // homogeneous strip variant - for (s = 0; s < nstrips; ++s) - { - for (j = 0; j < nfields + addfields; ++j) - { - for (i = 0; i < FieldDef.size() / nstrips; ++i) - { - int n = s * FieldDef.size() / nstrips + i; - - if (j >= nfields) - { - FieldDef[n]->m_fields.push_back(outname[j - nfields]); - } - else - { - FieldDef[n]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - - m_f->m_exp[s * (nfields + addfields) + j]->AppendFieldData( - FieldDef[n], FieldData[n]); - } + m_f->m_exp.insert(it, Exp); } } - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessNumModes.h b/library/FieldUtils/ProcessModules/ProcessNumModes.h index 821fa82a8835e70c1620456fe381bef7e1f03659..5720bc5c7edfe8d706992c01b8d46ee34051d188 100644 --- a/library/FieldUtils/ProcessModules/ProcessNumModes.h +++ b/library/FieldUtils/ProcessModules/ProcessNumModes.h @@ -66,6 +66,17 @@ public: { return "ProcessNumModes"; } + + virtual std::string GetModuleDescription() + { + return "Calculating number of modes"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessPointDataToFld.cpp b/library/FieldUtils/ProcessModules/ProcessPointDataToFld.cpp index 3ffee2237cbd6df94ef2d3ff6787a1c0974e4b96..195d3baebd602ed5b1e09c8af3e5ace5287e3d03 100644 --- a/library/FieldUtils/ProcessModules/ProcessPointDataToFld.cpp +++ b/library/FieldUtils/ProcessModules/ProcessPointDataToFld.cpp @@ -54,41 +54,16 @@ ModuleKey ProcessPointDataToFld::className = ProcessPointDataToFld::create, "Given discrete data at quadrature points project them onto an " "expansion" - "basis and output fld file. Requires .pts .xml and .fld files."); + "basis and output fld file. Requires frompts and .xml and .fld files."); ProcessPointDataToFld::ProcessPointDataToFld(FieldSharedPtr f) : ProcessModule(f) { - m_requireEquiSpaced = true; - m_config["setnantovalue"] = ConfigOption( false, "NotSet", "reset any nan value to prescribed value"); - if ((f->m_inputfiles.count("pts") == 0)) - { - cout << endl - << "A pts input file must be specified for the boundary " - "extraction module" - << endl; - - cout - << "Usage: Fieldconvert -m pointdatatofld file.pts file.xml out.fld" - << endl; - exit(3); - } - - if ((f->m_inputfiles.count("xml") == 0) && - (f->m_inputfiles.count("xml.gz") == 0)) - { - cout << endl - << "An xml or xml.gz input file must be specified for the " - "boundary extraction module" - << endl; - cout - << "Usage: Fieldconvert -m pointdatatofld file.pts file.xml out.fld" - << endl; - exit(3); - } + m_config["frompts"] = ConfigOption( + false, "NotSet", "Pts file from which to interpolate field"); } ProcessPointDataToFld::~ProcessPointDataToFld() @@ -97,15 +72,6 @@ ProcessPointDataToFld::~ProcessPointDataToFld() void ProcessPointDataToFld::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessPointDataToFld: projecting data to expansion..." - << endl; - } - } - int i, j; bool setnantovalue = false; NekDouble defvalue=0.0; @@ -117,23 +83,33 @@ void ProcessPointDataToFld::Process(po::variables_map &vm) } // Check for command line point specification if no .pts file specified - ASSERTL0(m_f->m_fieldPts != LibUtilities::NullPtsField, - "No input points found"); - - int nFields = m_f->m_fieldPts->GetNFields(); + // Load pts file + LibUtilities::PtsFieldSharedPtr fieldPts; + ASSERTL0( m_config["frompts"].as().compare("NotSet") != 0, + "ProcessInterpPointDataToFld requires frompts parameter"); + string inFile = m_config["frompts"].as().c_str(); + LibUtilities::CommSharedPtr c = + LibUtilities::GetCommFactory().CreateInstance("Serial", 0, 0); + LibUtilities::PtsIOSharedPtr ptsIO = + MemoryManager::AllocateSharedPtr(c); + ptsIO->Import(inFile, fieldPts); + + int nFields = fieldPts->GetNFields(); ASSERTL0(nFields > 0, "No field values provided in input"); - int dim = m_f->m_fieldPts->GetDim(); + int dim = fieldPts->GetDim(); // assume one field is already defined from input file. + ASSERTL0(m_f->m_numHomogeneousDir == 0, + "ProcessInterpPointDataToFld does not support homogeneous expansion"); + m_f->m_exp.resize(nFields); for (i = 1; i < nFields; ++i) { - m_f->m_exp[i] = m_f->AppendExpList(0); + m_f->m_exp[i] = m_f->AppendExpList(m_f->m_numHomogeneousDir); } - Array > pts; - m_f->m_fieldPts->GetPts(pts); + fieldPts->GetPts(pts); // set any nan values to default value if requested if (setnantovalue) @@ -150,7 +126,7 @@ void ProcessPointDataToFld::Process(po::variables_map &vm) } } - if (m_f->m_fieldPts->m_ptsInfo.count(LibUtilities::eIsEquiSpacedData) != 0) + if (fieldPts->m_ptsInfo.count(LibUtilities::eIsEquiSpacedData) != 0) { int totcoeffs = m_f->m_exp[0]->GetNcoeffs(); @@ -173,6 +149,8 @@ void ProcessPointDataToFld::Process(po::variables_map &vm) coeffs + offset, tmp = coeffs + offset); cnt += ncoeffs; } + m_f->m_exp[i]->BwdTrans(m_f->m_exp[i]->GetCoeffs(), + m_f->m_exp[i]->UpdatePhys()); } } else @@ -218,11 +196,6 @@ void ProcessPointDataToFld::Process(po::variables_map &vm) } } - if (m_f->m_session->GetComm()->GetRank() == 0) - { - cout << endl; - } - // forward transform fields for (i = 0; i < nFields; ++i) { @@ -231,23 +204,11 @@ void ProcessPointDataToFld::Process(po::variables_map &vm) } } - // set up output fld file. - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (j = 0; j < nFields; ++j) + // save field names + for (int j = 0; j < fieldPts->GetNFields(); ++j) { - for (i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back(m_f->m_fieldPts->GetFieldName(j)); - - m_f->m_exp[j]->AppendFieldData(FieldDef[i], FieldData[i]); - } + m_f->m_variables.push_back(fieldPts->GetFieldName(j)); } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } } } diff --git a/library/FieldUtils/ProcessModules/ProcessPointDataToFld.h b/library/FieldUtils/ProcessModules/ProcessPointDataToFld.h index 84f3c3932ec35b8daddc7a7cafc9a4d34e6b0497..ab4ced911e22df7c283a33fe3392278c216391d6 100644 --- a/library/FieldUtils/ProcessModules/ProcessPointDataToFld.h +++ b/library/FieldUtils/ProcessModules/ProcessPointDataToFld.h @@ -69,6 +69,16 @@ public: return "ProcessPointDataToFld"; } + virtual std::string GetModuleDescription() + { + return "Projecting data to expansion"; + } + + virtual ModulePriority GetModulePriority() + { + return eFillExp; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.cpp b/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.cpp index e19d1954c4afc26688874df89f946a54a19a417c..0cff17706630eb3edb3779562255963402af7968 100644 --- a/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.cpp +++ b/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.cpp @@ -63,26 +63,25 @@ ProcessPrintFldNorms::~ProcessPrintFldNorms() void ProcessPrintFldNorms::Process(po::variables_map &vm) { - if (m_f->m_verbose) + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessPrintFldNorms: Printing norms..." << endl; - } + return; } // Evaluate norms and print for (int j = 0; j < m_f->m_exp.size(); ++j) { - m_f->m_exp[j]->BwdTrans(m_f->m_exp[j]->GetCoeffs(), - m_f->m_exp[j]->UpdatePhys()); NekDouble L2 = m_f->m_exp[j]->L2(m_f->m_exp[j]->GetPhys()); NekDouble LInf = m_f->m_exp[j]->Linf(m_f->m_exp[j]->GetPhys()); - cout << "L 2 error (variable " << m_f->m_session->GetVariable(j) - << ") : " << L2 << endl; - cout << "L inf error (variable " << m_f->m_session->GetVariable(j) - << ") : " << LInf << endl; + if (m_f->m_comm->TreatAsRankZero()) + { + cout << "L 2 error (variable " << m_f->m_variables[j] + << ") : " << L2 << endl; + cout << "L inf error (variable " << m_f->m_variables[j] + << ") : " << LInf << endl; + } } } } diff --git a/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.h b/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.h index 408c36f49902efa4ee92591f9bd3950df0fb4abb..ee0a00e90761adb9d005ed71dd5d7f40ee01b2a4 100644 --- a/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.h +++ b/library/FieldUtils/ProcessModules/ProcessPrintFldNorms.h @@ -66,6 +66,17 @@ public: { return "ProcessPrintFldNorms"; } + + virtual std::string GetModuleDescription() + { + return "Printing norms"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessQCriterion.cpp b/library/FieldUtils/ProcessModules/ProcessQCriterion.cpp index f7c260cedcef7311f7154412409fe4599ba1f7b4..b8957ce496c271ed5661e2678c7969542de54e23 100644 --- a/library/FieldUtils/ProcessModules/ProcessQCriterion.cpp +++ b/library/FieldUtils/ProcessModules/ProcessQCriterion.cpp @@ -63,214 +63,135 @@ ProcessQCriterion::~ProcessQCriterion() void ProcessQCriterion::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int nfields = m_f->m_variables.size(); + m_f->m_variables.push_back("Q"); + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessQCriterion: Calculating Q Criterion..." << endl; - } + return; } - int i, j, s; + int i, s; int expdim = m_f->m_graph->GetMeshDimension(); - int spacedim = expdim; - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) - { - spacedim = 3; - } - int nfields = m_f->m_fielddef[0]->m_fields.size(); - if (spacedim == 1 || spacedim == 2) - { - cerr << "\n Error: ProcessQCriterion must be computed for a 3D" - " (or quasi-3D) case. \n" - << endl; - } + int spacedim = expdim + (m_f->m_numHomogeneousDir); - // For calculating Q-Criterion only 1 field must be added - int addfields = 1; + ASSERTL0(spacedim == 3, + "ProcessQCriterion must be computed for a 3D (or quasi-3D) case."); int npoints = m_f->m_exp[0]->GetNpoints(); - Array > grad(nfields * nfields); + Array > grad(spacedim * spacedim); - Array > omega(nfields * nfields); - Array > S(nfields * nfields); + Array omega(npoints); + Array S(npoints); - Array > outfield(addfields); - Array > outfield1(addfields); - Array > outfield2(addfields); - Array > outfield3(addfields); + // Will store the Q-Criterion + Array outfield (npoints); + Array outfield1(npoints); + Array outfield2(npoints); + Array outfield3(npoints); int nstrips; m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - m_f->m_exp.resize(nfields * nstrips); - - for (i = 0; i < nfields * nfields; ++i) + for (i = 0; i < spacedim * spacedim; ++i) { grad[i] = Array(npoints); } - for (i = 0; i < addfields; ++i) - { - // Will store the Q-Criterion - outfield[i] = Array(npoints); - outfield1[i] = Array(npoints); - outfield2[i] = Array(npoints); - outfield3[i] = Array(npoints); - - omega[i] = Array(npoints); - S[i] = Array(npoints); - } - - vector Exp(nstrips * addfields); + vector::iterator it; + MultiRegions::ExpListSharedPtr Exp; for (s = 0; s < nstrips; ++s) // homogeneous strip varient { - for (i = 0; i < nfields; ++i) + for (i = 0; i < spacedim; ++i) { m_f->m_exp[s * nfields + i]->PhysDeriv( - m_f->m_exp[s * nfields + i]->GetPhys(), grad[i * nfields], - grad[i * nfields + 1], grad[i * nfields + 2]); + m_f->m_exp[s * nfields + i]->GetPhys(), grad[i * spacedim], + grad[i * spacedim + 1], grad[i * spacedim + 2]); } // W_x = Wy - Vz - Vmath::Vsub(npoints, grad[2 * nfields + 1], 1, grad[1 * nfields + 2], 1, - outfield1[0], 1); + Vmath::Vsub(npoints, grad[2 * spacedim + 1], 1, + grad[1 * spacedim + 2], 1, + outfield1, 1); // W_x^2 - Vmath::Vmul(npoints, outfield1[0], 1, outfield1[0], 1, outfield1[0], 1); + Vmath::Vmul(npoints, outfield1, 1, outfield1, 1, outfield1, 1); // W_y = Uz - Wx - Vmath::Vsub(npoints, grad[0 * nfields + 2], 1, grad[2 * nfields + 0], 1, - outfield2[0], 1); + Vmath::Vsub(npoints, grad[0 * spacedim + 2], 1, + grad[2 * spacedim + 0], 1, + outfield2, 1); // W_y^2 - Vmath::Vmul(npoints, outfield2[0], 1, outfield2[0], 1, outfield2[0], 1); + Vmath::Vmul(npoints, outfield2, 1, outfield2, 1, outfield2, 1); // W_z = Vx - Uy - Vmath::Vsub(npoints, grad[1 * nfields + 0], 1, grad[0 * nfields + 1], 1, - outfield3[0], 1); + Vmath::Vsub(npoints, grad[1 * spacedim + 0], 1, + grad[0 * spacedim + 1], 1, + outfield3, 1); // W_z^2 - Vmath::Vmul(npoints, outfield3[0], 1, outfield3[0], 1, outfield3[0], 1); - - // add fields omega = 0.5*(W_x^2 + W_y^2 + W_z^2) + Vmath::Vmul(npoints, outfield3, 1, outfield3, 1, outfield3, 1); + // Omega = 0.5*(W_x^2 + W_y^2 + W_z^2) NekDouble fac = 0.5; - Vmath::Vadd(npoints, &outfield1[0][0], 1, &outfield2[0][0], 1, - &omega[0][0], 1); - Vmath::Vadd(npoints, &omega[0][0], 1, &outfield3[0][0], 1, &omega[0][0], - 1); - - for (int k = 0; k < addfields; ++k) - { - Vmath::Smul(npoints, fac, &omega[k][0], 1, &omega[k][0], 1); - } - - Vmath::Zero(npoints, &outfield1[0][0], 1); - Vmath::Zero(npoints, &outfield2[0][0], 1); - Vmath::Zero(npoints, &outfield3[0][0], 1); - - Vmath::Vmul(npoints, grad[0 * nfields + 0], 1, grad[0 * nfields + 0], 1, - outfield1[0], 1); - Vmath::Vmul(npoints, grad[1 * nfields + 1], 1, grad[1 * nfields + 1], 1, - outfield2[0], 1); - Vmath::Vmul(npoints, grad[2 * nfields + 2], 1, grad[2 * nfields + 2], 1, - outfield3[0], 1); - - Vmath::Vadd(npoints, &outfield1[0][0], 1, &outfield2[0][0], 1, &S[0][0], - 1); - Vmath::Vadd(npoints, &S[0][0], 1, &outfield3[0][0], 1, &S[0][0], 1); - - // W_y + V_z - Vmath::Vadd(npoints, grad[2 * nfields + 1], 1, grad[1 * nfields + 2], 1, - outfield1[0], 1); - Vmath::Vmul(npoints, &outfield1[0][0], 1, &outfield1[0][0], 1, - &outfield1[0][0], 1); - - // U_z + W_x - Vmath::Vadd(npoints, grad[0 * nfields + 2], 1, grad[2 * nfields + 0], 1, - outfield2[0], 1); - Vmath::Vmul(npoints, &outfield2[0][0], 1, &outfield2[0][0], 1, - &outfield2[0][0], 1); - - // V_x + U_y - Vmath::Vadd(npoints, grad[1 * nfields + 0], 1, grad[0 * nfields + 1], 1, - outfield3[0], 1); - Vmath::Vmul(npoints, &outfield3[0][0], 1, &outfield3[0][0], 1, - &outfield3[0][0], 1); - - Vmath::Vadd(npoints, &outfield1[0][0], 1, &outfield2[0][0], 1, - &outfield2[0][0], 1); - Vmath::Vadd(npoints, &outfield2[0][0], 1, &outfield3[0][0], 1, - &outfield3[0][0], 1); - - for (int k = 0; k < addfields; ++k) - { - Vmath::Smul(npoints, fac, &outfield3[k][0], 1, &outfield3[k][0], 1); - } - - Vmath::Vadd(npoints, &outfield3[0][0], 1, &S[0][0], 1, &S[0][0], 1); - Vmath::Vsub(npoints, omega[0], 1, S[0], 1, outfield[0], 1); - - for (int k = 0; k < addfields; ++k) - { - Vmath::Smul(npoints, fac, &outfield[k][0], 1, &outfield[k][0], 1); - } - - for (i = 0; i < addfields; ++i) - { - int n = s * addfields + i; - Exp[n] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir); - Exp[n]->UpdatePhys() = outfield[i]; - Exp[n]->FwdTrans(outfield[i], Exp[n]->UpdateCoeffs()); - } - } - - vector::iterator it; - - for (s = 0; s < nstrips; ++s) - { - for (i = 0; i < addfields; ++i) - { - it = m_f->m_exp.begin() + s * (nfields + addfields) + nfields + i; - m_f->m_exp.insert(it, Exp[s * addfields + i]); - } + Vmath::Vadd(npoints, outfield1, 1, outfield2, 1, omega, 1); + Vmath::Vadd(npoints, omega, 1, outfield3, 1, omega, 1); + Vmath::Smul(npoints, fac, omega, 1, omega, 1); + + // Ux^2 + Vmath::Vmul(npoints, grad[0 * spacedim + 0], 1, + grad[0 * spacedim + 0], 1, + outfield1, 1); + // Vy^2 + Vmath::Vmul(npoints, grad[1 * spacedim + 1], 1, + grad[1 * spacedim + 1], 1, + outfield2, 1); + // Wz^2 + Vmath::Vmul(npoints, grad[2 * spacedim + 2], 1, + grad[2 * spacedim + 2], 1, + outfield3, 1); + + // + Vmath::Vadd(npoints, outfield1, 1, outfield2, 1, S, 1); + Vmath::Vadd(npoints, S, 1, outfield3, 1, S, 1); + + // Wy + Vz + Vmath::Vadd(npoints, grad[2 * spacedim + 1], 1, + grad[1 * spacedim + 2], 1, + outfield1, 1); + Vmath::Vmul(npoints, outfield1, 1, outfield1, 1, outfield1, 1); + + // Uz + Wx + Vmath::Vadd(npoints, grad[0 * spacedim + 2], 1, + grad[2 * spacedim + 0], 1, + outfield2, 1); + Vmath::Vmul(npoints, outfield2, 1, outfield2, 1, outfield2, 1); + + // Vx + Uy + Vmath::Vadd(npoints, grad[1 * spacedim + 0], 1, + grad[0 * spacedim + 1], 1, + outfield3, 1); + Vmath::Vmul(npoints, outfield3, 1, outfield3, 1, outfield3, 1); + + Vmath::Vadd(npoints, outfield1, 1, outfield2, 1, outfield2, 1); + Vmath::Vadd(npoints, outfield2, 1, outfield3, 1, outfield3, 1); + + Vmath::Smul(npoints, fac, outfield3, 1, outfield3, 1); + + Vmath::Vadd(npoints, outfield3, 1, S, 1, S, 1); + Vmath::Vsub(npoints, omega, 1, S, 1, outfield, 1); + + Vmath::Smul(npoints, fac, outfield, 1, outfield, 1); + + Exp = m_f->AppendExpList(m_f->m_numHomogeneousDir); + Vmath::Vcopy(npoints, outfield, 1, Exp->UpdatePhys(), 1); + Exp->FwdTrans_IterPerExp(outfield, Exp->UpdateCoeffs()); + + it = m_f->m_exp.begin() + s * (nfields + 1) + nfields; + m_f->m_exp.insert(it, Exp); } - - vector outname; - outname.push_back("Q"); - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (s = 0; s < nstrips; ++s) // homogeneous strip varient - { - for (j = 0; j < nfields + addfields; ++j) - { - for (i = 0; i < FieldDef.size() / nstrips; ++i) - { - int n = s * FieldDef.size() / nstrips + i; - - if (j >= nfields) - { - FieldDef[n]->m_fields.push_back(outname[j - nfields]); - } - else - { - FieldDef[n]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - m_f->m_exp[s * (nfields + addfields) + j]->AppendFieldData( - FieldDef[n], FieldData[n]); - } - } - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; } + } } diff --git a/library/FieldUtils/ProcessModules/ProcessQCriterion.h b/library/FieldUtils/ProcessModules/ProcessQCriterion.h index 6bc5fde8f7e79bdb26a7141a4ec1e490dca374d9..03209fa98a19cb9d40ea08650b1a7059dbc9a587 100644 --- a/library/FieldUtils/ProcessModules/ProcessQCriterion.h +++ b/library/FieldUtils/ProcessModules/ProcessQCriterion.h @@ -67,6 +67,17 @@ public: { return "ProcessQCriterion"; } + + virtual std::string GetModuleDescription() + { + return "Calculating Q Criterion"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessQualityMetric.cpp b/library/FieldUtils/ProcessModules/ProcessQualityMetric.cpp index a1aae9b2b7c079b2ca6dbc36783789930bf96c8f..cc80c4a49d469be90bdfc0e40df2d881c0057534 100644 --- a/library/FieldUtils/ProcessModules/ProcessQualityMetric.cpp +++ b/library/FieldUtils/ProcessModules/ProcessQualityMetric.cpp @@ -71,23 +71,37 @@ ProcessQualityMetric::~ProcessQualityMetric() void ProcessQualityMetric::Process(po::variables_map &vm) { - if (m_f->m_verbose) + int nfields = m_f->m_variables.size(); + m_f->m_variables.push_back("qualitymetric"); + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessQualityMetric: Adding quality metric to field" - << endl; - } + return; } - Array &phys = m_f->m_exp[0]->UpdatePhys(); - Array &coeffs = m_f->m_exp[0]->UpdateCoeffs(); + int NumHomogeneousDir = m_f->m_numHomogeneousDir; + MultiRegions::ExpListSharedPtr exp; + + if (nfields) + { + m_f->m_exp.resize(nfields + 1); + exp = m_f->AppendExpList(NumHomogeneousDir); - for (int i = 0; i < m_f->m_exp[0]->GetExpSize(); ++i) + m_f->m_exp[nfields] = exp; + } + else + { + exp = m_f->m_exp[0]; + } + + Array &phys = exp->UpdatePhys(); + Array &coeffs = exp->UpdateCoeffs(); + + for (int i = 0; i < exp->GetExpSize(); ++i) { // copy Jacobian into field - LocalRegions::ExpansionSharedPtr Elmt = m_f->m_exp[0]->GetExp(i); - int offset = m_f->m_exp[0]->GetPhys_Offset(i); + LocalRegions::ExpansionSharedPtr Elmt = exp->GetExp(i); + int offset = exp->GetPhys_Offset(i); Array q = GetQ(Elmt,m_config["scaled"].m_beenSet); Array out = phys + offset; @@ -96,20 +110,7 @@ void ProcessQualityMetric::Process(po::variables_map &vm) Vmath::Vcopy(q.num_elements(), q, 1, out, 1); } - m_f->m_exp[0]->FwdTrans_IterPerExp(phys, coeffs); - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (int i = 0; i < FieldDef.size(); ++i) - { - FieldDef[i]->m_fields.push_back("QualityMetric"); - m_f->m_exp[0]->AppendFieldData(FieldDef[i], FieldData[i]); - } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; + exp->FwdTrans_IterPerExp(phys, coeffs); } inline vector MappingIdealToRef(SpatialDomains::GeometrySharedPtr geom, diff --git a/library/FieldUtils/ProcessModules/ProcessQualityMetric.h b/library/FieldUtils/ProcessModules/ProcessQualityMetric.h index 2554925af7358a18a3b2443b7c197e613b17a87a..d9655cee79d90b312d63725430325ef92f7b5f44 100644 --- a/library/FieldUtils/ProcessModules/ProcessQualityMetric.h +++ b/library/FieldUtils/ProcessModules/ProcessQualityMetric.h @@ -65,6 +65,16 @@ public: return "ProcessQualityMetric"; } + virtual std::string GetModuleDescription() + { + return "Adding quality metric to field"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + private: Array GetQ(LocalRegions::ExpansionSharedPtr e, bool s); }; diff --git a/library/FieldUtils/ProcessModules/ProcessScalGrad.cpp b/library/FieldUtils/ProcessModules/ProcessScalGrad.cpp index 951a47316c50609019648837f16175102db17662..8ffa4f6626fc1351259bd933995a4850052a5eb0 100644 --- a/library/FieldUtils/ProcessModules/ProcessScalGrad.cpp +++ b/library/FieldUtils/ProcessModules/ProcessScalGrad.cpp @@ -54,12 +54,8 @@ ModuleKey ProcessScalGrad::className = ProcessScalGrad::create, "Computes scalar gradient field."); -ProcessScalGrad::ProcessScalGrad(FieldSharedPtr f) : ProcessModule(f) +ProcessScalGrad::ProcessScalGrad(FieldSharedPtr f) : ProcessBoundaryExtract(f) { - m_config["bnd"] = ConfigOption(false, "All", "Boundary to be extracted"); - f->m_writeBndFld = true; - f->m_declareExpansionAsContField = true; - m_f->m_fldToBnd = false; } ProcessScalGrad::~ProcessScalGrad() @@ -68,47 +64,32 @@ ProcessScalGrad::~ProcessScalGrad() void ProcessScalGrad::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessScalGrad: Calculating scalar gradient..." << endl; - } - } + ProcessBoundaryExtract::Process(vm); int i, j, k; - // Set up Field options to output boundary fld - string bvalues = m_config["bnd"].as(); - - if (bvalues.compare("All") == 0) + int spacedim = m_f->m_graph->GetSpaceDimension(); + if ((m_f->m_numHomogeneousDir) == 1 || (m_f->m_numHomogeneousDir) == 2) { - Array BndExp = - m_f->m_exp[0]->GetBndCondExpansions(); - - for (i = 0; i < BndExp.num_elements(); ++i) - { - m_f->m_bndRegionsToWrite.push_back(i); - } + spacedim = 3; } - else + + int nfields = m_f->m_variables.size(); + + string var; + for (i = 0; i < nfields; i++) { - ASSERTL0(ParseUtils::GenerateOrderedVector(bvalues.c_str(), - m_f->m_bndRegionsToWrite), - "Failed to interpret range string"); + var = m_f->m_variables[i]; + stringstream filename; + filename << var << "_scalar_gradient"; + filename >> var; + m_f->m_variables[i] = var; } - - int spacedim = m_f->m_graph->GetSpaceDimension(); - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) + if (m_f->m_exp[0]->GetNumElmts() == 0) { - spacedim = 3; + return; } - int nfields = m_f->m_fielddef[0]->m_fields.size(); - // ASSERTL0(nfields == 1,"Implicit assumption that input is in ADR format of - // (u)"); - if (spacedim == 1) { ASSERTL0(false, "Error: scalar gradient for a 1D problem cannot " @@ -118,7 +99,6 @@ void ProcessScalGrad::Process(po::variables_map &vm) int ngrad = spacedim; int n, cnt, elmtid, nq, offset, boundary, nfq; int npoints = m_f->m_exp[0]->GetNpoints(); - string var; Array scalar; Array > grad(ngrad), fgrad(ngrad), outfield(nfields); @@ -132,12 +112,6 @@ void ProcessScalGrad::Process(po::variables_map &vm) for (i = 0; i < nfields; i++) { - var = m_f->m_fielddef[0]->m_fields[i]; - stringstream filename; - filename << var << "_scalar_gradient"; - filename >> var; - m_f->m_fielddef[0]->m_fields[i] = var; - BndExp[i] = m_f->m_exp[i]->GetBndCondExpansions(); outfield[i] = Array(npoints); } diff --git a/library/FieldUtils/ProcessModules/ProcessScalGrad.h b/library/FieldUtils/ProcessModules/ProcessScalGrad.h index 644a6795c41d54afe9b2348cba7e5381db24bc64..c768b446fec888aa9afa9efcf9a3212073508785 100644 --- a/library/FieldUtils/ProcessModules/ProcessScalGrad.h +++ b/library/FieldUtils/ProcessModules/ProcessScalGrad.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_PROCESSSCALGRAD #define FIELDUTILS_PROCESSSCALGRAD -#include "../Module.h" +#include "ProcessBoundaryExtract.h" namespace Nektar { @@ -47,7 +47,7 @@ namespace FieldUtils * @brief This processing module calculates the scalar gradient field and * writes it to a surface output file. */ -class ProcessScalGrad : public ProcessModule +class ProcessScalGrad : public ProcessBoundaryExtract { public: /// Creates an instance of this class @@ -67,6 +67,12 @@ public: { return "ProcessScalGrad"; } + + virtual std::string GetModuleDescription() + { + return "Calculating scalar gradient"; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessScaleInFld.cpp b/library/FieldUtils/ProcessModules/ProcessScaleInFld.cpp index 647f550dc3c7b625bddecff89279d34ff4b7ff69..c61078fbfb2be3f7556f4e4bf1dfbfe63467b912 100644 --- a/library/FieldUtils/ProcessModules/ProcessScaleInFld.cpp +++ b/library/FieldUtils/ProcessModules/ProcessScaleInFld.cpp @@ -55,19 +55,9 @@ ModuleKey ProcessScaleInFld::className = ProcessScaleInFld::ProcessScaleInFld(FieldSharedPtr f) : ProcessModule(f) { - if ((f->m_inputfiles.count("fld") == 0) && - (f->m_inputfiles.count("rst") == 0) && - (f->m_inputfiles.count("chk") == 0)) - { - cout << "A fld, chk or rst input file must be specified for the " - "scaleinputfld module" - << endl; - exit(3); - } - m_config["scale"] = ConfigOption(false, "NotSet", "scale factor"); ASSERTL0(m_config["scale"].as().compare("NotSet") != 0, - "scaleinputfld: Need to specify a sacle factor"); + "scaleinputfld: Need to specify a scale factor"); } ProcessScaleInFld::~ProcessScaleInFld() @@ -76,20 +66,9 @@ ProcessScaleInFld::~ProcessScaleInFld() void ProcessScaleInFld::Process(po::variables_map &vm) { - ASSERTL0(m_f->m_data.size() != 0, "No input data defined"); - string scalestr = m_config["scale"].as(); NekDouble scale = boost::lexical_cast(scalestr); - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessScaleInFld: Rescaling input fld by factor" << scale - << "..." << endl; - } - } - for (int i = 0; i < m_f->m_data.size(); ++i) { int datalen = m_f->m_data[i].size(); @@ -97,23 +76,6 @@ void ProcessScaleInFld::Process(po::variables_map &vm) Vmath::Smul(datalen, scale, &(m_f->m_data[i][0]), 1, &(m_f->m_data[i][0]), 1); } - - if (m_f->m_exp.size()) // expansiosn are defined reload field - { - int nfields = m_f->m_fielddef[0]->m_fields.size(); - - // import basic field again in case of rescaling - for (int j = 0; j < nfields; ++j) - { - for (int i = 0; i < m_f->m_data.size(); ++i) - { - m_f->m_exp[j]->ExtractDataToCoeffs( - m_f->m_fielddef[i], m_f->m_data[i], - m_f->m_fielddef[i]->m_fields[j], - m_f->m_exp[j]->UpdateCoeffs()); - } - } - } } } } diff --git a/library/FieldUtils/ProcessModules/ProcessScaleInFld.h b/library/FieldUtils/ProcessModules/ProcessScaleInFld.h index 0632d67dad9570f86722e9c534e5c7c743394896..dbe8b0e2486fd6a55bca1bdc10197c27de07a027 100644 --- a/library/FieldUtils/ProcessModules/ProcessScaleInFld.h +++ b/library/FieldUtils/ProcessModules/ProcessScaleInFld.h @@ -68,6 +68,16 @@ public: return "ProcessScaleInFld"; } + virtual std::string GetModuleDescription() + { + return "Rescaling input fld"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyFieldData; + } + private: }; } diff --git a/library/FieldUtils/ProcessModules/ProcessSurfDistance.cpp b/library/FieldUtils/ProcessModules/ProcessSurfDistance.cpp index 964c00add7ca5d7458256fe3b0eadea2f40170c1..99abe6c1925174795c431e25555947a8954adcb7 100644 --- a/library/FieldUtils/ProcessModules/ProcessSurfDistance.cpp +++ b/library/FieldUtils/ProcessModules/ProcessSurfDistance.cpp @@ -50,13 +50,9 @@ ModuleKey ProcessSurfDistance::className = ProcessSurfDistance::create, "Computes height of element connected to a surface."); -ProcessSurfDistance::ProcessSurfDistance(FieldSharedPtr f) : ProcessModule(f) +ProcessSurfDistance::ProcessSurfDistance(FieldSharedPtr f) + : ProcessBoundaryExtract(f) { - m_config["bnd"] = - ConfigOption(false, "-1", "Boundary region to calculate height"); - f->m_writeBndFld = true; - f->m_declareExpansionAsContField = true; - m_f->m_fldToBnd = false; } ProcessSurfDistance::~ProcessSurfDistance() @@ -65,14 +61,9 @@ ProcessSurfDistance::~ProcessSurfDistance() void ProcessSurfDistance::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessSurfDistance: Calculating distance to surface..." - << endl; - } - } + ProcessBoundaryExtract::Process(vm); + ASSERTL0( !boost::iequals(m_config["bnd"].as(), "All"), + "ProcessSurfDistance needs bnd parameter with a single id."); int i, j, k, cnt; int surf = m_config["bnd"].as(); @@ -80,33 +71,38 @@ void ProcessSurfDistance::Process(po::variables_map &vm) ASSERTL0(surf >= 0, "Invalid surface " + boost::lexical_cast(surf)); - // Add this boundary region to the list that we will output. - m_f->m_bndRegionsToWrite.push_back(surf); - - // Remove existing fields. - m_f->m_exp.resize(1); + int nfields = m_f->m_variables.size(); + m_f->m_variables.push_back("dist"); - // Grab boundary expansions. - Array BndExp = - m_f->m_exp[0]->GetBndCondExpansions(); - - // Get map that takes us from boundary element to element. - Array BoundarytoElmtID, BoundarytoTraceID; - m_f->m_exp[0]->GetBoundaryToElmtMap(BoundarytoElmtID, BoundarytoTraceID); + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; + } - if (m_f->m_fielddef.size() == 0) + int NumHomogeneousDir = m_f->m_numHomogeneousDir; + MultiRegions::ExpListSharedPtr exp; + if (nfields) { - m_f->m_fielddef = m_f->m_exp[0]->GetFieldDefinitions(); - m_f->m_fielddef[0]->m_fields.push_back("dist"); + m_f->m_exp.resize(nfields + 1); + exp = m_f->AppendExpList(NumHomogeneousDir); + + m_f->m_exp[nfields] = exp; } else { - // Override field variable - m_f->m_fielddef[0]->m_fields[0] = "dist"; + exp = m_f->m_exp[0]; } - ASSERTL0(!(m_f->m_fielddef[0]->m_numHomogeneousDir), - "Homogeneous expansions not supported"); + // Grab boundary expansions. + Array BndExp = + exp->GetBndCondExpansions(); + + // Get map that takes us from boundary element to element. + Array BoundarytoElmtID, BoundarytoTraceID; + exp->GetBoundaryToElmtMap(BoundarytoElmtID, BoundarytoTraceID); + + ASSERTL0(!(m_f->m_numHomogeneousDir), + "Homogeneous expansions not supported"); for (i = cnt = 0; i < BndExp.num_elements(); ++i) { @@ -125,7 +121,7 @@ void ProcessSurfDistance::Process(po::variables_map &vm) // Get boundary and element expansions. LocalRegions::ExpansionSharedPtr bndElmt = BndExp[i]->GetExp(j); LocalRegions::ExpansionSharedPtr elmt = - m_f->m_exp[0]->GetExp(elmtNum); + exp->GetExp(elmtNum); // Determine which face is opposite to the surface switch (elmt->DetShapeType()) @@ -239,7 +235,8 @@ void ProcessSurfDistance::Process(po::variables_map &vm) Vmath::Vsqrt(nqBnd, dist, 1, dist, 1); } - BndExp[i]->FwdTrans(BndExp[i]->GetPhys(), BndExp[i]->UpdateCoeffs()); + BndExp[i]->FwdTrans_IterPerExp(BndExp[i]->GetPhys(), + BndExp[i]->UpdateCoeffs()); } } } diff --git a/library/FieldUtils/ProcessModules/ProcessSurfDistance.h b/library/FieldUtils/ProcessModules/ProcessSurfDistance.h index c89a517db0e2f3181346beb702582f7834be6617..58269a35c99a67efa91256da320ee50e584ebd46 100644 --- a/library/FieldUtils/ProcessModules/ProcessSurfDistance.h +++ b/library/FieldUtils/ProcessModules/ProcessSurfDistance.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_PROCESSSURFDISTANCE #define FIELDUTILS_PROCESSSURFDISTANCE -#include "../Module.h" +#include "ProcessBoundaryExtract.h" namespace Nektar { @@ -47,7 +47,7 @@ namespace FieldUtils * @brief This processing module calculates the height of an element connected * to a surface and adds it as an extra-field to the output file. */ -class ProcessSurfDistance : public ProcessModule +class ProcessSurfDistance : public ProcessBoundaryExtract { public: /// Creates an instance of this class @@ -67,6 +67,12 @@ public: { return "ProcessSurfDistance"; } + + virtual std::string GetModuleDescription() + { + return "Calculating distance to surface"; + } + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessVorticity.cpp b/library/FieldUtils/ProcessModules/ProcessVorticity.cpp index 4349afa416244304a50bc0cd8991abebacad1fdd..a7cc71dde6029b8439499b338c0e5872551abe31 100644 --- a/library/FieldUtils/ProcessModules/ProcessVorticity.cpp +++ b/library/FieldUtils/ProcessModules/ProcessVorticity.cpp @@ -65,41 +65,47 @@ ProcessVorticity::~ProcessVorticity() void ProcessVorticity::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessVorticity: Calculating vorticity..." << endl; - } - } - - int i, j, s; + int i, s; int expdim = m_f->m_graph->GetMeshDimension(); - int spacedim = expdim; - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) + m_spacedim = expdim; + if ((m_f->m_numHomogeneousDir) == 1 || (m_f->m_numHomogeneousDir) == 2) { - spacedim = 3; + m_spacedim = 3; } - int nfields = m_f->m_fielddef[0]->m_fields.size(); - if (spacedim == 1) + int nfields = m_f->m_variables.size(); + if (m_spacedim == 1) { ASSERTL0(false, "Error: Vorticity for a 1D problem cannot " "be computed") } - int addfields = (spacedim == 2) ? 1 : 3; + int addfields = (m_spacedim == 2) ? 1 : 3; + // Append field names + if (addfields == 1) + { + m_f->m_variables.push_back("W_z"); + } + else + { + m_f->m_variables.push_back("W_x"); + m_f->m_variables.push_back("W_y"); + m_f->m_variables.push_back("W_z"); + } + + // Skip in case of empty partition + if (m_f->m_exp[0]->GetNumElmts() == 0) + { + return; + } int npoints = m_f->m_exp[0]->GetNpoints(); - Array > grad(spacedim * spacedim); + Array > grad(m_spacedim * m_spacedim); Array > outfield(addfields); int nstrips; m_f->m_session->LoadParameter("Strip_Z", nstrips, 1); - m_f->m_exp.resize(nfields * nstrips); - - for (i = 0; i < spacedim * spacedim; ++i) + for (i = 0; i < m_spacedim * m_spacedim; ++i) { grad[i] = Array(npoints); } @@ -109,8 +115,8 @@ void ProcessVorticity::Process(po::variables_map &vm) outfield[i] = Array(npoints); } - Array > tmp(spacedim); - for (int i = 0; i < spacedim; i++) + Array > tmp(m_spacedim); + for (int i = 0; i < m_spacedim; i++) { tmp[i] = Array(npoints); } @@ -124,25 +130,18 @@ void ProcessVorticity::Process(po::variables_map &vm) { // Get velocity and convert to Cartesian system, // if it is still in transformed system - Array > vel(spacedim); + Array > vel(m_spacedim); + GetVelocity(vel, s); if (m_f->m_fieldMetaDataMap.count("MappingCartesianVel")) { if (m_f->m_fieldMetaDataMap["MappingCartesianVel"] == "False") { // Initialize arrays and copy velocity - for (int i = 0; i < spacedim; ++i) + if (m_f->m_exp[0]->GetWaveSpace()) { - vel[i] = Array(npoints); - if (m_f->m_exp[0]->GetWaveSpace()) + for (int i = 0; i < m_spacedim; ++i) { - m_f->m_exp[0]->HomogeneousBwdTrans( - m_f->m_exp[s * nfields + i]->GetPhys(), vel[i]); - } - else - { - Vmath::Vcopy(npoints, - m_f->m_exp[s * nfields + i]->GetPhys(), 1, - vel[i], 1); + m_f->m_exp[0]->HomogeneousBwdTrans(vel[i], vel[i]); } } // Convert velocity to cartesian system @@ -150,78 +149,59 @@ void ProcessVorticity::Process(po::variables_map &vm) // Convert back to wavespace if necessary if (m_f->m_exp[0]->GetWaveSpace()) { - for (int i = 0; i < spacedim; ++i) + for (int i = 0; i < m_spacedim; ++i) { m_f->m_exp[0]->HomogeneousFwdTrans(vel[i], vel[i]); } } } - else - { - for (int i = 0; i < spacedim; ++i) - { - vel[i] = Array(npoints); - Vmath::Vcopy(npoints, - m_f->m_exp[s * nfields + i]->GetPhys(), 1, - vel[i], 1); - } - } - } - else - { - for (int i = 0; i < spacedim; ++i) - { - vel[i] = Array(npoints); - Vmath::Vcopy(npoints, m_f->m_exp[s * nfields + i]->GetPhys(), 1, - vel[i], 1); - } } // Calculate Gradient & Vorticity - if (spacedim == 2) + if (m_spacedim == 2) { - for (i = 0; i < spacedim; ++i) + for (i = 0; i < m_spacedim; ++i) { m_f->m_exp[s * nfields + i]->PhysDeriv(vel[i], tmp[0], tmp[1]); mapping->CovarToCartesian(tmp, tmp); - for (int j = 0; j < spacedim; j++) + for (int j = 0; j < m_spacedim; j++) { - Vmath::Vcopy(npoints, tmp[j], 1, grad[i * spacedim + j], 1); + Vmath::Vcopy(npoints, tmp[j], 1, grad[i * m_spacedim + j], 1); } } // W_z = Vx - Uy - Vmath::Vsub(npoints, grad[1 * spacedim + 0], 1, - grad[0 * spacedim + 1], 1, outfield[0], 1); + Vmath::Vsub(npoints, grad[1 * m_spacedim + 0], 1, + grad[0 * m_spacedim + 1], 1, outfield[0], 1); } else { - for (i = 0; i < spacedim; ++i) + for (i = 0; i < m_spacedim; ++i) { m_f->m_exp[s * nfields + i]->PhysDeriv(vel[i], tmp[0], tmp[1], tmp[2]); mapping->CovarToCartesian(tmp, tmp); - for (int j = 0; j < spacedim; j++) + for (int j = 0; j < m_spacedim; j++) { - Vmath::Vcopy(npoints, tmp[j], 1, grad[i * spacedim + j], 1); + Vmath::Vcopy(npoints, tmp[j], 1, grad[i * m_spacedim + j], 1); } } // W_x = Wy - Vz - Vmath::Vsub(npoints, grad[2 * spacedim + 1], 1, - grad[1 * spacedim + 2], 1, outfield[0], 1); + Vmath::Vsub(npoints, grad[2 * m_spacedim + 1], 1, + grad[1 * m_spacedim + 2], 1, outfield[0], 1); // W_y = Uz - Wx - Vmath::Vsub(npoints, grad[0 * spacedim + 2], 1, - grad[2 * spacedim + 0], 1, outfield[1], 1); + Vmath::Vsub(npoints, grad[0 * m_spacedim + 2], 1, + grad[2 * m_spacedim + 0], 1, outfield[1], 1); // W_z = Vx - Uy - Vmath::Vsub(npoints, grad[1 * spacedim + 0], 1, - grad[0 * spacedim + 1], 1, outfield[2], 1); + Vmath::Vsub(npoints, grad[1 * m_spacedim + 0], 1, + grad[0 * m_spacedim + 1], 1, outfield[2], 1); } for (i = 0; i < addfields; ++i) { int n = s * addfields + i; Exp[n] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir); + m_f->AppendExpList(m_f->m_numHomogeneousDir); Vmath::Vcopy(npoints, outfield[i], 1, Exp[n]->UpdatePhys(), 1); Exp[n]->FwdTrans_IterPerExp(outfield[i], Exp[n]->UpdateCoeffs()); } @@ -236,48 +216,43 @@ void ProcessVorticity::Process(po::variables_map &vm) m_f->m_exp.insert(it, Exp[s * addfields + i]); } } +} - vector outname; - if (addfields == 1) - { - outname.push_back("W_z"); - } - else +void ProcessVorticity::GetVelocity( Array > &vel, + int strip) +{ + int nfields = m_f->m_variables.size(); + int npoints = m_f->m_exp[0]->GetNpoints(); + if(boost::iequals(m_f->m_variables[0], "u")) { - outname.push_back("W_x"); - outname.push_back("W_y"); - outname.push_back("W_z"); + // IncNavierStokesSolver + for (int i = 0; i < m_spacedim; ++i) + { + vel[i] = Array(npoints); + Vmath::Vcopy(npoints, + m_f->m_exp[strip * nfields + i]->GetPhys(), 1, + vel[i], 1); + } } - - std::vector FieldDef = - m_f->m_exp[0]->GetFieldDefinitions(); - std::vector > FieldData(FieldDef.size()); - - for (s = 0; s < nstrips; ++s) // homogeneous strip varient + else if(boost::iequals(m_f->m_variables[0], "rho") && + boost::iequals(m_f->m_variables[1], "rhou")) { - for (j = 0; j < nfields + addfields; ++j) + // CompressibleFlowSolver + for (int i = 0; i < m_spacedim; ++i) { - for (i = 0; i < FieldDef.size() / nstrips; ++i) - { - int n = s * FieldDef.size() / nstrips + i; - - if (j >= nfields) - { - FieldDef[n]->m_fields.push_back(outname[j - nfields]); - } - else - { - FieldDef[n]->m_fields.push_back( - m_f->m_fielddef[0]->m_fields[j]); - } - m_f->m_exp[s * (nfields + addfields) + j]->AppendFieldData( - FieldDef[n], FieldData[n]); - } + vel[i] = Array(npoints); + Vmath::Vdiv(npoints, + m_f->m_exp[strip * nfields + i + 1]->GetPhys(), 1, + m_f->m_exp[strip * nfields + 0 ]->GetPhys(), 1, + vel[i], 1); } } - - m_f->m_fielddef = FieldDef; - m_f->m_data = FieldData; + else + { + // Unknown + ASSERTL0(false, "Could not identify velocity for ProcessVorticity"); + } } + } } diff --git a/library/FieldUtils/ProcessModules/ProcessVorticity.h b/library/FieldUtils/ProcessModules/ProcessVorticity.h index e425b94e79bf74eb7e37818cba103ed9f3cdbe51..23e0fe43e26a9ce7afbaed6ef9332a2ee3fab0aa 100644 --- a/library/FieldUtils/ProcessModules/ProcessVorticity.h +++ b/library/FieldUtils/ProcessModules/ProcessVorticity.h @@ -66,6 +66,23 @@ public: { return "ProcessVorticity"; } + + virtual std::string GetModuleDescription() + { + return "Calculating vorticity"; + } + + virtual ModulePriority GetModulePriority() + { + return eModifyExp; + } + +protected: + void GetVelocity( Array > &vel, int strip = 0); + +private: + int m_spacedim; + }; } } diff --git a/library/FieldUtils/ProcessModules/ProcessWSS.cpp b/library/FieldUtils/ProcessModules/ProcessWSS.cpp index 4531ebd9a815e07a01fd809570c9d2490d88a2d6..520a9e4828175030ed05f82d57d6e35b6de2e657 100644 --- a/library/FieldUtils/ProcessModules/ProcessWSS.cpp +++ b/library/FieldUtils/ProcessModules/ProcessWSS.cpp @@ -53,15 +53,8 @@ ModuleKey ProcessWSS::className = GetModuleFactory().RegisterCreatorFunction( ProcessWSS::create, "Computes wall shear stress field."); -ProcessWSS::ProcessWSS(FieldSharedPtr f) : ProcessModule(f) +ProcessWSS::ProcessWSS(FieldSharedPtr f) : ProcessBoundaryExtract(f) { - m_config["bnd"] = ConfigOption(false, "All", "Boundary to be extracted"); - m_config["addnormals"] = - ConfigOption(true, "NotSet", "Add normals to output"); - f->m_writeBndFld = true; - f->m_declareExpansionAsContField = true; - f->m_requireBoundaryExpansion = true; - m_f->m_fldToBnd = false; } ProcessWSS::~ProcessWSS() @@ -70,48 +63,37 @@ ProcessWSS::~ProcessWSS() void ProcessWSS::Process(po::variables_map &vm) { - if (m_f->m_verbose) - { - if (m_f->m_comm->TreatAsRankZero()) - { - cout << "ProcessWSS: Calculating wall shear stress..." << endl; - } - } + ProcessBoundaryExtract::Process(vm); - m_f->m_addNormals = m_config["addnormals"].m_beenSet; - - // Set up Field options to output boundary fld - string bvalues = m_config["bnd"].as(); + NekDouble kinvis = m_f->m_session->GetParameter("Kinvis"); - if (bvalues.compare("All") == 0) + int i, j; + int nfields = m_f->m_variables.size(); + int spacedim = m_f->m_graph->GetSpaceDimension(); + if ((m_f->m_numHomogeneousDir) == 1 || (m_f->m_numHomogeneousDir) == 2) { - Array BndExp = - m_f->m_exp[0]->GetBndCondExpansions(); + spacedim += m_f->m_numHomogeneousDir; + } - for (int i = 0; i < BndExp.num_elements(); ++i) - { - m_f->m_bndRegionsToWrite.push_back(i); - } + if (spacedim == 2) + { + m_f->m_variables.push_back("Shear_x"); + m_f->m_variables.push_back("Shear_y"); + m_f->m_variables.push_back("Shear_mag"); } else { - ASSERTL0(ParseUtils::GenerateOrderedVector(bvalues.c_str(), - m_f->m_bndRegionsToWrite), - "Failed to interpret range string"); + m_f->m_variables.push_back("Shear_x"); + m_f->m_variables.push_back("Shear_y"); + m_f->m_variables.push_back("Shear_z"); + m_f->m_variables.push_back("Shear_mag"); } - - NekDouble kinvis = m_f->m_session->GetParameter("Kinvis"); - - int i, j; - int spacedim = m_f->m_graph->GetSpaceDimension(); - if ((m_f->m_fielddef[0]->m_numHomogeneousDir) == 1 || - (m_f->m_fielddef[0]->m_numHomogeneousDir) == 2) + if (m_f->m_exp[0]->GetNumElmts() == 0) { - spacedim += m_f->m_fielddef[0]->m_numHomogeneousDir; + return; } - int nfields = m_f->m_fielddef[0]->m_fields.size(); - ASSERTL0(m_f->m_fielddef[0]->m_fields[0] == "u", + ASSERTL0(m_f->m_variables[0] == "u", "Implicit assumption that input is in incompressible format of " "(u,v,p) or (u,v,w,p)"); @@ -134,33 +116,14 @@ void ProcessWSS::Process(po::variables_map &vm) Array BndExp(newfields); Array BndElmtExp(spacedim); - // Extract original fields to boundary (for output) - for (int i = 0; i < m_f->m_exp.size(); ++i) - { - m_f->m_exp[i]->FillBndCondFromField(); - } - m_f->m_exp.resize(nfields + newfields); - string var = "u"; for (i = 0; i < newfields; ++i) { m_f->m_exp[nfields + i] = - m_f->AppendExpList(m_f->m_fielddef[0]->m_numHomogeneousDir, var); + m_f->AppendExpList(m_f->m_numHomogeneousDir); } - if (spacedim == 2) - { - m_f->m_fielddef[0]->m_fields.push_back("Shear_x"); - m_f->m_fielddef[0]->m_fields.push_back("Shear_y"); - m_f->m_fielddef[0]->m_fields.push_back("Shear_mag"); - } - else - { - m_f->m_fielddef[0]->m_fields.push_back("Shear_x"); - m_f->m_fielddef[0]->m_fields.push_back("Shear_y"); - m_f->m_fielddef[0]->m_fields.push_back("Shear_z"); - m_f->m_fielddef[0]->m_fields.push_back("Shear_mag"); - } + // Create map of boundary ids for partitioned domains SpatialDomains::BoundaryConditions bcs(m_f->m_session, @@ -303,7 +266,8 @@ void ProcessWSS::Process(po::variables_map &vm) { Vmath::Vvtvp(nqb, normals[i], 1, fshear[nshear - 1], 1, fshear[i], 1, fshear[i], 1); - BndExp[i]->FwdTrans(fshear[i], BndExp[i]->UpdateCoeffs()); + BndExp[i]->FwdTrans_IterPerExp(fshear[i], + BndExp[i]->UpdateCoeffs()); } // Tw @@ -314,7 +278,7 @@ void ProcessWSS::Process(po::variables_map &vm) fshear[nshear - 1], 1, fshear[nshear - 1], 1); } Vmath::Vsqrt(nqb, fshear[nshear - 1], 1, fshear[nshear - 1], 1); - BndExp[nshear - 1]->FwdTrans(fshear[nshear - 1], + BndExp[nshear - 1]->FwdTrans_IterPerExp(fshear[nshear - 1], BndExp[nshear - 1]->UpdateCoeffs()); } } diff --git a/library/FieldUtils/ProcessModules/ProcessWSS.h b/library/FieldUtils/ProcessModules/ProcessWSS.h index a2f56c42bd5b965207ae8e45e36d6bf754706163..900e385c202430d34751a26b6fb175227bbddbe4 100644 --- a/library/FieldUtils/ProcessModules/ProcessWSS.h +++ b/library/FieldUtils/ProcessModules/ProcessWSS.h @@ -36,7 +36,7 @@ #ifndef FIELDUTILS_PROCESSWSS #define FIELDUTILS_PROCESSWSS -#include "../Module.h" +#include "ProcessBoundaryExtract.h" namespace Nektar { @@ -47,7 +47,7 @@ namespace FieldUtils * @brief This processing module calculates the wall shear stress and adds it * as an extra-field to the output file, and writes it to a surface output file. */ -class ProcessWSS : public ProcessModule +class ProcessWSS : public ProcessBoundaryExtract { public: /// Creates an instance of this class @@ -67,6 +67,12 @@ public: { return "ProcessWSS"; } + + virtual std::string GetModuleDescription() + { + return "Calculating wall shear stress"; + } + }; } } diff --git a/library/GlobalMapping/CMakeLists.txt b/library/GlobalMapping/CMakeLists.txt index b4b9fe98fc02597fc8ae07510749090bcdb4f5a3..689987442a06553e774ecaefa93624dae2e6b3b4 100644 --- a/library/GlobalMapping/CMakeLists.txt +++ b/library/GlobalMapping/CMakeLists.txt @@ -22,7 +22,9 @@ SET(GLOBAL_MAPPING_HEADERS ADD_DEFINITIONS(-DGLOBAL_MAPPING_EXPORTS) -ADD_NEKTAR_LIBRARY(GlobalMapping lib ${NEKTAR_LIBRARY_TYPE} ${GLOBAL_MAPPING_SOURCES} ${GLOBAL_MAPPING_HEADERS}) -TARGET_LINK_LIBRARIES(GlobalMapping MultiRegions) - -INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/GlobalMapping COMPONENT dev FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") +ADD_NEKTAR_LIBRARY(GlobalMapping + SOURCES ${GLOBAL_MAPPING_SOURCES} + HEADERS ${GLOBAL_MAPPING_HEADERS} + DEPENDS MultiRegions + SUMMARY "Nektar++ GlobalMapping library" + DESCRIPTION "This library provides deformation routines for Nektar++ simulations.") diff --git a/library/GlobalMapping/Mapping.cpp b/library/GlobalMapping/Mapping.cpp index 183106bdb6540af9b4b698d05497d5cd5d1d4d9d..b5a5845b84dac90d8fd027d81768b0e060c8e719 100644 --- a/library/GlobalMapping/Mapping.cpp +++ b/library/GlobalMapping/Mapping.cpp @@ -48,10 +48,8 @@ bool Mapping::m_isDefined = false; MappingFactory& GetMappingFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static MappingFactory instance; + return instance; } Mapping::Mapping(const LibUtilities::SessionReaderSharedPtr& pSession, diff --git a/library/LibUtilities/BasicUtils/FieldIO.cpp b/library/LibUtilities/BasicUtils/FieldIO.cpp index cc8191d1f3be25869ba0a5b7ac05aa19a5915791..fab68d668d1586979ecd35a78c6d21d4016919e9 100644 --- a/library/LibUtilities/BasicUtils/FieldIO.cpp +++ b/library/LibUtilities/BasicUtils/FieldIO.cpp @@ -43,8 +43,6 @@ #include #include -#include - #include #include @@ -73,10 +71,8 @@ std::string fldCmdFormat = SessionReader::RegisterCmdLineArgument( */ FieldIOFactory &GetFieldIOFactory() { - typedef Loki:: - SingletonHolder Type; - return Type::Instance(); + static FieldIOFactory instance; + return instance; } /// Enumerator for auto-detection of FieldIO types. @@ -102,10 +98,10 @@ const std::string FieldIO::GetFileType(const std::string &filename, CommSharedPtr comm) { FieldIOType ioType = eXML; - int size = comm->GetSize(); - int rank = comm->GetRank(); + int size = comm->GetSize(); + bool root = comm->TreatAsRankZero(); - if (size == 1 || rank == 0) + if (size == 1 || root) { std::string datafilename; @@ -402,14 +398,14 @@ std::string FieldIO::SetUpOutput(const std::string outname, bool perRank, bool b ASSERTL0(!outname.empty(), "Empty path given to SetUpOutput()"); int nprocs = m_comm->GetSize(); - int rank = m_comm->GetRank(); + bool root = m_comm->TreatAsRankZero(); // Path to output: will be directory if parallel, normal file if // serial. fs::path specPath(outname), fulloutname; // in case we are rank 0 or not on a shared filesystem, check if the specPath already exists - if (backup && (rank == 0 || !m_sharedFilesystem) && fs::exists(specPath)) + if (backup && (root || !m_sharedFilesystem) && fs::exists(specPath)) { // rename. foo/bar_123.chk -> foo/bar_123.bak0.chk and in case // foo/bar_123.bak0.chk already exists, foo/bar_123.chk -> foo/bar_123.bak1.chk @@ -476,7 +472,7 @@ std::string FieldIO::SetUpOutput(const std::string outname, bool perRank, bool b m_comm->Block(); // Now get rank 0 processor to tidy everything else up. - if (rank == 0 || !m_sharedFilesystem) + if (root || !m_sharedFilesystem) { try { @@ -492,7 +488,7 @@ std::string FieldIO::SetUpOutput(const std::string outname, bool perRank, bool b m_comm->Block(); } - if (rank == 0) + if (root) { cout << "Writing: " << specPath; } @@ -508,7 +504,7 @@ std::string FieldIO::SetUpOutput(const std::string outname, bool perRank, bool b { try { - if (rank == 0 || !m_sharedFilesystem) + if (root || !m_sharedFilesystem) { fs::create_directory(specPath); } diff --git a/library/LibUtilities/BasicUtils/FieldIOHdf5.cpp b/library/LibUtilities/BasicUtils/FieldIOHdf5.cpp index 24b5ebfaf78bed7fdab3c97d205ab819edacf3a4..c7b29d074159d573735d222fd6c17e593dbf0196 100644 --- a/library/LibUtilities/BasicUtils/FieldIOHdf5.cpp +++ b/library/LibUtilities/BasicUtils/FieldIOHdf5.cpp @@ -864,6 +864,7 @@ void FieldIOHdf5::v_Import(const std::string &infilename, // Set properties for parallel file access (if we're in parallel) H5::PListSharedPtr parallelProps = H5::PList::Default(); H5::PListSharedPtr readPL = H5::PList::Default(); + H5::PListSharedPtr readPLInd = H5::PList::Default(); if (nRanks > 1) { @@ -873,6 +874,8 @@ void FieldIOHdf5::v_Import(const std::string &infilename, // Use collective IO readPL = H5::PList::DatasetXfer(); readPL->SetDxMpioCollective(); + readPLInd = H5::PList::DatasetXfer(); + readPLInd->SetDxMpioIndependent(); } DataSourceSharedPtr dataSource = H5DataSource::create( @@ -1012,8 +1015,9 @@ void FieldIOHdf5::v_Import(const std::string &infilename, map >::iterator gIt; for (gIt = groupsToDecomps.begin(); gIt != groupsToDecomps.end(); ++gIt) { - // Select region from dataset for this decomposition. set::iterator sIt; + + // Select region from dataset for this decomposition. for (sIt = gIt->second.begin(); sIt != gIt->second.end(); ++sIt) { std::stringstream fieldNameStream; @@ -1021,7 +1025,7 @@ void FieldIOHdf5::v_Import(const std::string &infilename, FieldDefinitionsSharedPtr fielddef = MemoryManager::AllocateSharedPtr(); - ImportFieldDef(readPL, root, decomps, *sIt, decompsToOffsets[*sIt], + ImportFieldDef(readPLInd, root, decomps, *sIt, decompsToOffsets[*sIt], fieldNameStream.str(), fielddef); fielddef->m_elementIDs = groupsToElmts[*sIt]; @@ -1031,7 +1035,7 @@ void FieldIOHdf5::v_Import(const std::string &infilename, { std::vector decompFieldData; ImportFieldData( - readPL, data_dset, data_fspace, + readPLInd, data_dset, data_fspace, decompsToOffsets[*sIt].data, decomps, *sIt, fielddef, decompFieldData); fielddata.push_back(decompFieldData); @@ -1276,7 +1280,6 @@ void FieldIOHdf5::ImportFieldData( data_fspace->SelectRange(data_i, nFieldVals); data_dset->Read(fielddata, data_fspace, readPL); - int datasize = CheckFieldDefinition(fielddef); ASSERTL0( fielddata.size() == datasize * fielddef->m_fields.size(), diff --git a/library/LibUtilities/BasicUtils/FieldIOXml.cpp b/library/LibUtilities/BasicUtils/FieldIOXml.cpp index b641e5d24f71b5eb0e1683b78a0e2dc8e9bd7e3d..bc1bf9bbee0d28c60b3fa669b12dd3fdf5f23d53 100644 --- a/library/LibUtilities/BasicUtils/FieldIOXml.cpp +++ b/library/LibUtilities/BasicUtils/FieldIOXml.cpp @@ -91,7 +91,7 @@ void FieldIOXml::v_Write(const std::string &outFile, const bool backup) { double tm0 = 0.0, tm1 = 0.0; - if (m_comm->GetRank() == 0) + if (m_comm->TreatAsRankZero()) { tm0 = m_comm->Wtime(); } @@ -358,7 +358,7 @@ void FieldIOXml::v_Write(const std::string &outFile, m_comm->Block(); // all data has been written - if (m_comm->GetRank() == 0) + if (m_comm->TreatAsRankZero()) { tm1 = m_comm->Wtime(); cout << " (" << tm1 - tm0 << "s, XML)" << endl; diff --git a/library/LibUtilities/BasicUtils/H5.h b/library/LibUtilities/BasicUtils/H5.h index 8e5184a4eaf5e292442bc6521e7068ccce1d46be..575007ae4ddf8f04b8e9c8a67efe2eb668ebf51a 100644 --- a/library/LibUtilities/BasicUtils/H5.h +++ b/library/LibUtilities/BasicUtils/H5.h @@ -76,6 +76,7 @@ namespace H5 0); \ } + class Error : public std::exception { }; @@ -474,12 +475,9 @@ public: DataTypeSharedPtr mem_t = DataTypeTraits::GetType(); DataSpaceSharedPtr memspace = DataSpace::OneD(data.size()); - H5Dwrite(m_Id, - mem_t->GetId(), - memspace->GetId(), - filespace->GetId(), - dxpl->GetId(), - &data[0]); + H5_CALL(H5Dwrite, + (m_Id, mem_t->GetId(), memspace->GetId(), filespace->GetId(), + dxpl->GetId(), &data[0]) ); } template void Read(std::vector &data) { @@ -508,12 +506,8 @@ public: data.resize(len); DataSpaceSharedPtr memspace = DataSpace::OneD(len); - H5Dread(m_Id, - mem_t->GetId(), - memspace->GetId(), - filespace->GetId(), - dxpl->GetId(), - &data[0]); + H5_CALL(H5Dread, (m_Id, mem_t->GetId(), memspace->GetId(), + filespace->GetId(), dxpl->GetId(), &data[0])); } private: diff --git a/library/LibUtilities/BasicUtils/MeshPartition.cpp b/library/LibUtilities/BasicUtils/MeshPartition.cpp index 6605afe1a4715bcf952b56bbe19b47ec8e518c9f..b9df345e6a69fd3936325469010e0b3b8d101686 100644 --- a/library/LibUtilities/BasicUtils/MeshPartition.cpp +++ b/library/LibUtilities/BasicUtils/MeshPartition.cpp @@ -69,11 +69,8 @@ namespace Nektar { MeshPartitionFactory& GetMeshPartitionFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static MeshPartitionFactory instance; + return instance; } MeshPartition::MeshPartition(const LibUtilities::SessionReaderSharedPtr& pSession) : @@ -1558,6 +1555,9 @@ namespace Nektar id = vIt->second.list[j]; vVertices[id] = m_meshVertices[id]; } + // Compile list of edges (for curved information) + id = vIt->second.id; + vEdges[id] = m_meshEdges[id]; } } } @@ -1955,163 +1955,160 @@ namespace Nektar } } - if (m_dim >= 2) - { - std::map::const_iterator vItCurve; + std::map::const_iterator vItCurve; - if(m_isCompressed) + if(m_isCompressed) + { + std::vector edgeinfo; + std::vector faceinfo; + MeshCurvedPts curvedpts; + curvedpts.id = 0; // assume all points are going in here + int ptoffset = 0; + int newidx = 0; + std::map idxmap; + + for (vItCurve = m_meshCurved.begin(); + vItCurve != m_meshCurved.end(); + ++vItCurve) { - std::vector edgeinfo; - std::vector faceinfo; - MeshCurvedPts curvedpts; - curvedpts.id = 0; // assume all points are going in here - int ptoffset = 0; - int newidx = 0; - std::map idxmap; - - for (vItCurve = m_meshCurved.begin(); - vItCurve != m_meshCurved.end(); - ++vItCurve) - { - MeshCurved c = vItCurve->second; + MeshCurved c = vItCurve->second; - bool IsEdge = boost::iequals(c.entitytype,"E"); - bool IsFace = boost::iequals(c.entitytype,"F"); + bool IsEdge = boost::iequals(c.entitytype,"E"); + bool IsFace = boost::iequals(c.entitytype,"F"); - if((IsEdge&&vEdges.find(c.entityid) != vEdges.end())|| - (IsFace&&vFaces.find(c.entityid) != vFaces.end())) + if((IsEdge&&vEdges.find(c.entityid) != vEdges.end())|| + (IsFace&&vFaces.find(c.entityid) != vFaces.end())) + { + MeshCurvedInfo cinfo; + // add in + cinfo.id = c.id; + cinfo.entityid = c.entityid; + cinfo.npoints = c.npoints; + for(int i = 0; i < SIZE_PointsType; ++i) { - MeshCurvedInfo cinfo; - // add in - cinfo.id = c.id; - cinfo.entityid = c.entityid; - cinfo.npoints = c.npoints; - for(int i = 0; i < SIZE_PointsType; ++i) + if(c.type.compare(kPointsTypeStr[i]) == 0) { - if(c.type.compare(kPointsTypeStr[i]) == 0) - { - cinfo.ptype = (PointsType) i; - break; - } + cinfo.ptype = (PointsType) i; + break; } - cinfo.ptid = 0; // set to just one point set - cinfo.ptoffset = ptoffset; - ptoffset += c.npoints; + } + cinfo.ptid = 0; // set to just one point set + cinfo.ptoffset = ptoffset; + ptoffset += c.npoints; - if (IsEdge) + if (IsEdge) + { + edgeinfo.push_back(cinfo); + } + else + { + faceinfo.push_back(cinfo); + } + + // fill in points to list. + for(int i =0; i < c.npoints; ++i) + { + // get index from full list; + int idx = m_meshCurvedPts[c.ptid] + .index[c.ptoffset+i]; + + // if index is not already in curved + // points add it or set index to location + if(idxmap.count(idx) == 0) { - edgeinfo.push_back(cinfo); + idxmap[idx] = newidx; + curvedpts.index.push_back(newidx); + curvedpts.pts.push_back( + m_meshCurvedPts[c.ptid].pts[idx]); + newidx++; } else { - faceinfo.push_back(cinfo); - } - - // fill in points to list. - for(int i =0; i < c.npoints; ++i) - { - // get index from full list; - int idx = m_meshCurvedPts[c.ptid] - .index[c.ptoffset+i]; - - // if index is not already in curved - // points add it or set index to location - if(idxmap.count(idx) == 0) - { - idxmap[idx] = newidx; - curvedpts.index.push_back(newidx); - curvedpts.pts.push_back( - m_meshCurvedPts[c.ptid].pts[idx]); - newidx++; - } - else - { - curvedpts.index.push_back(idxmap[idx]); - } + curvedpts.index.push_back(idxmap[idx]); } } } + } - // add xml information - if(edgeinfo.size()) - { - vCurved->SetAttribute("COMPRESSED", - CompressData::GetCompressString()); - vCurved->SetAttribute("BITSIZE", - CompressData::GetBitSizeStr()); - - x = new TiXmlElement("E"); - std::string dataStr; - CompressData::ZlibEncodeToBase64Str(edgeinfo,dataStr); - x->LinkEndChild(new TiXmlText(dataStr)); - vCurved->LinkEndChild(x); - } - - if(faceinfo.size()) - { - vCurved->SetAttribute("COMPRESSED", - CompressData::GetCompressString()); - vCurved->SetAttribute("BITSIZE", - CompressData::GetBitSizeStr()); - - x = new TiXmlElement("F"); - std::string dataStr; - CompressData::ZlibEncodeToBase64Str(faceinfo,dataStr); - x->LinkEndChild(new TiXmlText(dataStr)); - vCurved->LinkEndChild(x); - } + // add xml information + if(edgeinfo.size()) + { + vCurved->SetAttribute("COMPRESSED", + CompressData::GetCompressString()); + vCurved->SetAttribute("BITSIZE", + CompressData::GetBitSizeStr()); + + x = new TiXmlElement("E"); + std::string dataStr; + CompressData::ZlibEncodeToBase64Str(edgeinfo,dataStr); + x->LinkEndChild(new TiXmlText(dataStr)); + vCurved->LinkEndChild(x); + } - if(edgeinfo.size()||faceinfo.size()) - { - x = new TiXmlElement("DATAPOINTS"); - x->SetAttribute("ID", curvedpts.id); - - TiXmlElement *subx = new TiXmlElement("INDEX"); - std::string dataStr; - CompressData::ZlibEncodeToBase64Str(curvedpts.index, - dataStr); - subx->LinkEndChild(new TiXmlText(dataStr)); - x->LinkEndChild(subx); - - subx = new TiXmlElement("POINTS"); - CompressData::ZlibEncodeToBase64Str(curvedpts.pts, - dataStr); - subx->LinkEndChild(new TiXmlText(dataStr)); - x->LinkEndChild(subx); + if(faceinfo.size()) + { + vCurved->SetAttribute("COMPRESSED", + CompressData::GetCompressString()); + vCurved->SetAttribute("BITSIZE", + CompressData::GetBitSizeStr()); + + x = new TiXmlElement("F"); + std::string dataStr; + CompressData::ZlibEncodeToBase64Str(faceinfo,dataStr); + x->LinkEndChild(new TiXmlText(dataStr)); + vCurved->LinkEndChild(x); + } - vCurved->LinkEndChild(x); - } + if(edgeinfo.size()||faceinfo.size()) + { + x = new TiXmlElement("DATAPOINTS"); + x->SetAttribute("ID", curvedpts.id); + + TiXmlElement *subx = new TiXmlElement("INDEX"); + std::string dataStr; + CompressData::ZlibEncodeToBase64Str(curvedpts.index, + dataStr); + subx->LinkEndChild(new TiXmlText(dataStr)); + x->LinkEndChild(subx); + + subx = new TiXmlElement("POINTS"); + CompressData::ZlibEncodeToBase64Str(curvedpts.pts, + dataStr); + subx->LinkEndChild(new TiXmlText(dataStr)); + x->LinkEndChild(subx); + + vCurved->LinkEndChild(x); } - else + } + else + { + for (vItCurve = m_meshCurved.begin(); + vItCurve != m_meshCurved.end(); + ++vItCurve) { - for (vItCurve = m_meshCurved.begin(); - vItCurve != m_meshCurved.end(); - ++vItCurve) - { - MeshCurved c = vItCurve->second; + MeshCurved c = vItCurve->second; - bool IsEdge = boost::iequals(c.entitytype,"E"); - bool IsFace = boost::iequals(c.entitytype,"F"); + bool IsEdge = boost::iequals(c.entitytype,"E"); + bool IsFace = boost::iequals(c.entitytype,"F"); - if((IsEdge&&vEdges.find(c.entityid) != vEdges.end())|| - (IsFace&&vFaces.find(c.entityid) != vFaces.end())) + if((IsEdge&&vEdges.find(c.entityid) != vEdges.end())|| + (IsFace&&vFaces.find(c.entityid) != vFaces.end())) + { + x = new TiXmlElement(c.entitytype); + x->SetAttribute("ID", c.id); + if (IsEdge) { - x = new TiXmlElement(c.entitytype); - x->SetAttribute("ID", c.id); - if (IsEdge) - { - x->SetAttribute("EDGEID", c.entityid); - } - else - { - x->SetAttribute("FACEID", c.entityid); - } - x->SetAttribute("TYPE", c.type); - x->SetAttribute("NUMPOINTS", c.npoints); - y = new TiXmlText(c.data); - x->LinkEndChild(y); - vCurved->LinkEndChild(x); + x->SetAttribute("EDGEID", c.entityid); } + else + { + x->SetAttribute("FACEID", c.entityid); + } + x->SetAttribute("TYPE", c.type); + x->SetAttribute("NUMPOINTS", c.npoints); + y = new TiXmlText(c.data); + x->LinkEndChild(y); + vCurved->LinkEndChild(x); } } } @@ -2194,10 +2191,7 @@ namespace Nektar vElmtGeometry->LinkEndChild(vFace); } vElmtGeometry->LinkEndChild(vElement); - if (m_dim >= 2) - { - vElmtGeometry->LinkEndChild(vCurved); - } + vElmtGeometry->LinkEndChild(vCurved); vElmtGeometry->LinkEndChild(vComposite); vElmtGeometry->LinkEndChild(vDomain); diff --git a/library/LibUtilities/BasicUtils/NekFactory.hpp b/library/LibUtilities/BasicUtils/NekFactory.hpp index 65177a9ba5d83666805f210c3b0a2bf8ca385740..75393cb01e215aa8f4285ab1f7d3887db1876aad 100644 --- a/library/LibUtilities/BasicUtils/NekFactory.hpp +++ b/library/LibUtilities/BasicUtils/NekFactory.hpp @@ -33,415 +33,207 @@ // /////////////////////////////////////////////////////////////////////////////// -// Primary definition and generator for specialised object factories. -#ifndef BOOST_PP_IS_ITERATING +#ifndef NEKTAR_LIBUTILITIES_BASICUTILS_NEKFACTORY +#define NEKTAR_LIBUTILITIES_BASICUTILS_NEKFACTORY - #ifndef NEKTAR_LIB_UTILITIES_BASIC_UTILS_NEK_FACTORY_HPP - #define NEKTAR_LIB_UTILITIES_BASIC_UTILS_NEK_FACTORY_HPP +// Primary definition and generator for specialised object factories. +#include +#include - #include - #include - #include - #include - #include - #include +#include - #include +#include +#include +#include - #include - #include - #include +#include - #include +namespace Nektar +{ +namespace LibUtilities +{ - #ifndef MAX_PARAM - #define MAX_PARAM 6 // default maximum number of parameters to support - #endif +// Generate parameter typenames with default type of 'none' +typedef boost::unique_lock WriteLock; +typedef boost::shared_lock ReadLock; -namespace Nektar +/** + * @class NekFactory + * + * @brief Provides a generic Factory class. + * + * Implements a generic object factory. Class-types which use an arbitrary + * number of parameters may be used via C++ variadic templating. + * + * To allow a class to be instantiated by the factory, the following are + * required in each class definition (in the case of a single parameter): + * + * \code + * static [baseclass]* create([paramtype1] &P) { + * return new [derivedclass](P); + * } + * static std::string className; + * \endcode + * + * and outside the class definition in the implementation: + * + * \code + * std::string [derivedclass]::className + * = Factory:: + * RegisterCreatorFunction("[derivedclass]", + * [derivedclass]::create,"Description"); + * \endcode + * + * The assignment of the static variable className is done through the call to + * RegisterCreatorFunction, which registers the class with the factory prior to + * the start of the main() routine. + * + * To create an instance of a derived class, for instance: + * \code + * [baseclass]* var_name = + * Factory + * ::CreateInstance("[derivedclass]",Param1); + * \endcode + */ +template +class NekFactory { - namespace LibUtilities +public: + /// Description datatype + typedef std::string tDescription; + /// Comparison predicator of key + typedef std::less tPredicator; + /// Shared pointer to an object of baseclass type. + typedef boost::shared_ptr tBaseSharedPtr; + /// CreatorFunction type which takes parameter and returns base class shared + /// pointer. + typedef tBaseSharedPtr (*CreatorFunction) (tParam...); + + /// Define a struct to hold the information about a module. + struct ModuleEntry { - // For unused template parameters. - struct none {}; - - // Generate parameter typenames with default type of 'none' - #define FACTORY_print(z, n, data) BOOST_PP_CAT(data, n) = none - typedef boost::unique_lock WriteLock; - typedef boost::shared_lock ReadLock; - - /** - * @class NekFactory - * \brief Provides a generic Factory class. - * - * Implements a generic object factory. Class-types which use a - * potentially arbitrary number of parameters may be used with - * specialised forms of the NekFactory. An upper - * limit on the number of parameters is imposed by the MAX_PARAM - * preprocessor definition in the NekFactory.hpp file. The - * specialisations are generated at compile type using Boost - * preprocessor by through repeated inclusion of the NekFactory.hpp - * file. - * - * To allow a class to be instantiated by the factory, the - * following are required in each class definition (in the case of - * a single parameter): - * \code - * static [baseclass]* create([paramtype1] &P) { - * return new [derivedclass](P); - * } - * static std::string className; - * \endcode - * and outside the class definition in the implementation: - * \code - * std::string [derivedclass]::className - * = Factory:: - * RegisterCreatorFunction("[derivedclass]", - * [derivedclass]::create,"Description"); - * \endcode - * The assignment of the static variable className is done through the - * call to RegisterCreatorFunction, which registers the class with the - * factory prior to the start of the main() routine. - * - * To create an instance of a derived class, for instance: - * \code - * [baseclass]* var_name = - * Factory - * ::CreateInstance("[derivedclass]",Param1); - * \endcode - */ - template - class NekFactory + ModuleEntry(CreatorFunction pFunc, const tDescription pDesc) + : m_func(pFunc), + m_desc(pDesc) { - public: - /// Description datatype - typedef std::string tDescription; - /// Comparison predicator of key - typedef std::less tPredicator; - /// Shared pointer to an object of baseclass type. - typedef boost::shared_ptr tBaseSharedPtr; - /// CreatorFunction type which takes parameter and returns base - /// class shared pointer. - typedef tBaseSharedPtr (*CreatorFunction) (BOOST_PP_ENUM_PARAMS(MAX_PARAM, tParam)); - - /// Define a struct to hold the information about a module. - struct ModuleEntry - { - ModuleEntry(CreatorFunction pFunc, const tDescription pDesc) - : m_func(pFunc), - m_desc(pDesc) - { - } - - /// Function used to create instance of class. - CreatorFunction m_func; - /// Description of class for use in listing available classes. - tDescription m_desc; - }; - - /// Factory map between key and module data. - typedef std::map TMapFactory; - /// Iterator for factory map - typedef typename TMapFactory::iterator TMapFactoryIterator; - - - public: - NekFactory() : m_mutex() {} - - /** - * @brief Create an instance of the class referred to by \c idKey. - * - * Searches the factory's map for the given key and returns a shared - * base class pointer to a new instance of the associated class. - * @param idKey Key of class to create. - * @param x Parameter to pass to class constructor. - * @returns Base class pointer to new instance. - */ - tBaseSharedPtr CreateInstance(tKey idKey BOOST_PP_COMMA_IF(MAX_PARAM) - BOOST_PP_ENUM_BINARY_PARAMS(MAX_PARAM, tParam, x)) - { - - ReadLock vReadLock(m_mutex); - - // Now try and find the key in the map. - TMapFactoryIterator it = getMapFactory()->find(idKey); - - // If successful, check the CreatorFunction is defined and - // create a new instance of the class. - if (it != getMapFactory()->end()) - { - ModuleEntry *tmp = &(it->second); - vReadLock.unlock(); - - if (tmp->m_func) - { - try - { - return tmp->m_func(BOOST_PP_ENUM_PARAMS(MAX_PARAM, x)); - } - catch (const std::string& s) - { - std::stringstream errstr; - errstr << "Unable to create module: " << idKey << "\n"; - errstr << s; - ASSERTL0(false, errstr.str()); - } - } - } - - // If we get this far, the key doesn't exist, so throw an error. - std::stringstream errstr; - errstr << "No such module: " << idKey << std::endl; - PrintAvailableClasses(errstr); - ASSERTL0(false, errstr.str()); - return tBaseSharedPtr(); - } - - - /** - * @brief Register a class with the factory. - * - * This function is called by each class in a static context (prior - * to the execution of main()) and creates an entry for the class - * in the factory's map. - * @param idKey Key used to reference the class. - * @param classCreator Function to call to create an instance - * of this class. - * @param pDesc Optional description of class. - * @returns The given key \c idKey. - */ - tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, - tDescription pDesc = "") - { - WriteLock vWriteLock(m_mutex); - - ModuleEntry e(classCreator, pDesc); - getMapFactory()->insert(std::pair(idKey, e)); - return idKey; - } - - - /** - * @brief Checks if a particular module is available. - */ - bool ModuleExists(tKey idKey) - { - ReadLock vReadLock(m_mutex); - - // Now try and find the key in the map. - TMapFactoryIterator it = getMapFactory()->find(idKey); + } - if (it != getMapFactory()->end()) - { - return true; - } - return false; - } + /// Function used to create instance of class. + CreatorFunction m_func; + /// Description of class for use in listing available classes. + tDescription m_desc; + }; + /// Factory map between key and module data. + typedef std::map TMapFactory; + /// Iterator for factory map + typedef typename TMapFactory::iterator TMapFactoryIterator; + +public: + NekFactory() : m_mutex() {} + + /** + * @brief Create an instance of the class referred to by \c idKey. + * + * Searches the factory's map for the given key and returns a shared + * base class pointer to a new instance of the associated class. + * @param idKey Key of class to create. + * @param x Parameter to pass to class constructor. + * @returns Base class pointer to new instance. + */ + tBaseSharedPtr CreateInstance(tKey idKey, tParam... args) + { + ReadLock vReadLock(m_mutex); - /** - * @brief Prints the available classes to stdout. - */ - void PrintAvailableClasses(std::ostream& pOut = std::cout) - { - ReadLock vReadLock(m_mutex); - - pOut << std::endl << "Available classes: " << std::endl; - TMapFactoryIterator it; - for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) - { - pOut << std::endl << "Available classes: " << std::endl; - TMapFactoryIterator it; - for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) - { - pOut << " " << it->first; - if (it->second.m_desc != "") - { - pOut << ":" << std::endl << " " - << it->second.m_desc << std::endl; - } - else - { - pOut << std::endl; - } - } - } - } + // Now try and find the key in the map. + TMapFactoryIterator it = getMapFactory()->find(idKey); + // If successful, check the CreatorFunction is defined and + // create a new instance of the class. + if (it != getMapFactory()->end()) + { + ModuleEntry *tmp = &(it->second); + vReadLock.unlock(); - /** - * @brief Retrieves a key, given a description - */ - tKey GetKey(tDescription pDesc) + if (tmp->m_func) + { + try { - ReadLock vReadLock(m_mutex); - - TMapFactoryIterator it; - for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) - { - if (it->second.m_desc == pDesc) - { - return it->first; - } - } - std::string errstr = "Module '" - + boost::lexical_cast(pDesc) - + "' is not known."; - ASSERTL0(false, errstr); + return tmp->m_func(args...); } - - - /** - * @brief Returns the description of a class - */ - std::string GetClassDescription(tKey idKey) + catch (const std::string& s) { - ReadLock vReadLock(m_mutex); - - // Now try and find the key in the map. - TMapFactoryIterator it = getMapFactory()->find(idKey); - std::stringstream errstr; - errstr << "No such module: " << idKey << std::endl; - ASSERTL0 (it != getMapFactory()->end(), errstr.str()); - return it->second.m_desc; - } - - protected: - /** - * @brief Ensure the factory's map is created. - * @returns The factory's map. - */ - TMapFactory* getMapFactory() - { - return &mMapFactory; + errstr << "Unable to create module: " << idKey << "\n"; + errstr << s; + ASSERTL0(false, errstr.str()); } + } + } - private: - NekFactory(const NekFactory& rhs); - NekFactory& operator=(const NekFactory& rhs); - - TMapFactory mMapFactory; - - boost::shared_mutex m_mutex; - - }; - - #undef FACTORY_print - - #define BOOST_PP_ITERATION_LIMITS (0, MAX_PARAM-1) - #define BOOST_PP_FILENAME_1 "LibUtilities/BasicUtils/NekFactory.hpp" - #include BOOST_PP_ITERATE() - + // If we get this far, the key doesn't exist, so throw an error. + std::stringstream errstr; + errstr << "No such module: " << idKey << std::endl; + PrintAvailableClasses(errstr); + ASSERTL0(false, errstr.str()); + return tBaseSharedPtr(); } -} - #endif // end NEKTAR_LIB_UTILITIES_BASIC_UTILS_NEK_FACTORY_HPP - #undef MAX_PARAM -// Specialisations for the different numbers of parameters. -#else - // Define the number of parameters - #define n BOOST_PP_ITERATION() - // Define macro for printing the non-required template parameters - #define FACTORY_print(z, n, data) data - #include - #include - -typedef boost::unique_lock WriteLock; -typedef boost::shared_lock ReadLock; - - template < typename tKey, - typename tBase BOOST_PP_COMMA_IF(n) - BOOST_PP_ENUM_PARAMS(n, typename tParam) > - class NekFactory< tKey, tBase, - BOOST_PP_ENUM_PARAMS(n, tParam)BOOST_PP_COMMA_IF(n) - BOOST_PP_ENUM(BOOST_PP_SUB(MAX_PARAM,n), FACTORY_print, none) > + /** + * @brief Register a class with the factory. + * + * This function is called by each class in a static context (prior + * to the execution of main()) and creates an entry for the class + * in the factory's map. + * @param idKey Key used to reference the class. + * @param classCreator Function to call to create an instance + * of this class. + * @param pDesc Optional description of class. + * @returns The given key \c idKey. + */ + tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, + tDescription pDesc = "") { - public: - typedef std::string tDescription; - typedef std::less tPredicator; - typedef boost::shared_ptr tBaseSharedPtr; - typedef tBaseSharedPtr (*CreatorFunction) (BOOST_PP_ENUM_PARAMS(n, tParam)); - - struct ModuleEntry - { - ModuleEntry(CreatorFunction pFunc, const tDescription pDesc) - : m_func(pFunc), - m_desc(pDesc) - { - } - CreatorFunction m_func; - tDescription m_desc; - }; - typedef std::map TMapFactory; - typedef typename TMapFactory::iterator TMapFactoryIterator; - - NekFactory() : m_mutex() {} - - tBaseSharedPtr CreateInstance(tKey idKey BOOST_PP_COMMA_IF(n) - BOOST_PP_ENUM_BINARY_PARAMS(n, tParam, x)) - { - ReadLock vReadLock(m_mutex); + WriteLock vWriteLock(m_mutex); - TMapFactoryIterator it = getMapFactory()->find(idKey); - if (it != getMapFactory()->end()) - { - ModuleEntry *tmp = &(it->second); - vReadLock.unlock(); + ModuleEntry e(classCreator, pDesc); + getMapFactory()->insert(std::pair(idKey, e)); + return idKey; + } - if (tmp->m_func) - { - try - { - return tmp->m_func(BOOST_PP_ENUM_PARAMS(n, x)); - } - catch (const std::string& s) - { - std::stringstream errstr; - errstr << "Unable to create module: " << idKey << "\n"; - errstr << s; - ASSERTL0(false, errstr.str()); - } - } - } - std::stringstream errstr; - errstr << "No such module: " << idKey << std::endl; - PrintAvailableClasses(errstr); - ASSERTL0(false, errstr.str()); - return tBaseSharedPtr(); - } - tKey RegisterCreatorFunction(tKey idKey, - CreatorFunction classCreator, - tDescription pDesc = "") { - WriteLock vWriteLock(m_mutex); + /** + * @brief Checks if a particular module is available. + */ + bool ModuleExists(tKey idKey) + { + ReadLock vReadLock(m_mutex); - ModuleEntry e(classCreator, pDesc); - getMapFactory()->insert(std::pair(idKey, e)); - return idKey; - } + // Now try and find the key in the map. + TMapFactoryIterator it = getMapFactory()->find(idKey); - bool ModuleExists(tKey idKey) + if (it != getMapFactory()->end()) { - ReadLock vReadLock(m_mutex); + return true; + } + return false; + } - // Now try and find the key in the map. - TMapFactoryIterator it = getMapFactory()->find(idKey); - if (it != getMapFactory()->end()) - { - return true; - } - return false; - } + /** + * @brief Prints the available classes to stdout. + */ + void PrintAvailableClasses(std::ostream& pOut = std::cout) + { + ReadLock vReadLock(m_mutex); - void PrintAvailableClasses(std::ostream& pOut = std::cout) + pOut << std::endl << "Available classes: " << std::endl; + TMapFactoryIterator it; + for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) { - ReadLock vReadLock(m_mutex); - pOut << std::endl << "Available classes: " << std::endl; TMapFactoryIterator it; for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) @@ -450,7 +242,7 @@ typedef boost::shared_lock ReadLock; if (it->second.m_desc != "") { pOut << ":" << std::endl << " " - << it->second.m_desc << std::endl; + << it->second.m_desc << std::endl; } else { @@ -458,53 +250,68 @@ typedef boost::shared_lock ReadLock; } } } + } - tKey GetKey(tDescription pDesc) - { - ReadLock vReadLock(m_mutex); - TMapFactoryIterator it; - for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) + /** + * @brief Retrieves a key, given a description + */ + tKey GetKey(tDescription pDesc) + { + ReadLock vReadLock(m_mutex); + + TMapFactoryIterator it; + for (it = getMapFactory()->begin(); it != getMapFactory()->end(); ++it) + { + if (it->second.m_desc == pDesc) { - if (it->second.m_desc == pDesc) - { - return it->first; - } + return it->first; } - std::string errstr = "Module '" - + boost::lexical_cast(pDesc) - + "' is not known."; - ASSERTL0(false, errstr); } + std::string errstr = "Module '" + + boost::lexical_cast(pDesc) + + "' is not known."; + ASSERTL0(false, errstr); + } - std::string GetClassDescription(tKey idKey) - { - ReadLock vReadLock(m_mutex); - // Now try and find the key in the map. - TMapFactoryIterator it = getMapFactory()->find(idKey); + /** + * @brief Returns the description of a class + */ + std::string GetClassDescription(tKey idKey) + { + ReadLock vReadLock(m_mutex); + + // Now try and find the key in the map. + TMapFactoryIterator it = getMapFactory()->find(idKey); - std::stringstream errstr; - errstr << "No such module: " << idKey << std::endl; - ASSERTL0 (it != getMapFactory()->end(), errstr.str()); - return it->second.m_desc; - } + std::stringstream errstr; + errstr << "No such module: " << idKey << std::endl; + ASSERTL0 (it != getMapFactory()->end(), errstr.str()); + return it->second.m_desc; + } - protected: - TMapFactory * getMapFactory() { - return &mMapFactory; - } +protected: + /** + * @brief Ensure the factory's map is created. + * @returns The factory's map. + */ + TMapFactory* getMapFactory() + { + return &mMapFactory; + } - private: - NekFactory(const NekFactory& rhs); - NekFactory& operator=(const NekFactory& rhs); +private: + NekFactory(const NekFactory& rhs); + NekFactory& operator=(const NekFactory& rhs); - TMapFactory mMapFactory; - boost::shared_mutex m_mutex; + TMapFactory mMapFactory; - }; - #undef n - #undef FACTORY_print + boost::shared_mutex m_mutex; -#endif +}; + +} +} +#endif diff --git a/library/LibUtilities/BasicUtils/ParseUtils.hpp b/library/LibUtilities/BasicUtils/ParseUtils.hpp index 0577b3bc7287bb7c92ab89aeac5b606e66fe2eeb..a11c55381f5d51855dfa5fd5b82862b81d5bb333 100644 --- a/library/LibUtilities/BasicUtils/ParseUtils.hpp +++ b/library/LibUtilities/BasicUtils/ParseUtils.hpp @@ -64,7 +64,7 @@ namespace Nektar { SymbolFunctor symbolFunctor(&symbol); ValueFunctor valueFunctor(&value); - + return parse(str, // Begin grammar ( @@ -93,7 +93,7 @@ namespace Nektar space_p).full; } - + static bool GenerateOrderedVector(const char *const str, std::vector &vec) { // Functors used to parse the sequence. @@ -114,7 +114,7 @@ namespace Nektar { // Functors used to parse the sequence. fctor4 functor4(&vec); - + return parse(str, // Begin grammar ( @@ -124,12 +124,12 @@ namespace Nektar // End grammar space_p).full; } - + static bool GenerateUnOrderedVector(const char *const str, std::vector &vec) { // Functors used to parse the sequence. fctor5 functor5(&vec); - + return parse(str, // Begin grammar ( @@ -139,7 +139,22 @@ namespace Nektar // End grammar space_p).full; } - + + static bool GenerateUnOrderedVector(const char *const str, std::vector &vec) + { + // Functors used to parse the sequence. + fctor6 functor6(&vec); + + return parse(str, + // Begin grammar + ( + uint_p[functor6] >> *(',' >> uint_p[functor6]) + ) + , + // End grammar + space_p).full; + } + static bool GenerateOrderedStringVector(const char *const str, std::vector &vec) { // Functors used to parse the sequence. @@ -226,7 +241,7 @@ namespace Nektar m_value(value) { } - + void operator()(NekDouble val) const { *m_value = val; @@ -245,7 +260,7 @@ namespace Nektar void operator()(unsigned int n) const { -#ifdef NOTREQUIRED //SJS: I do not think we need this check +#ifdef NOTREQUIRED //SJS: I do not think we need this check if (!m_vector->empty()) { unsigned int prevElem = m_vector->back(); @@ -304,7 +319,7 @@ namespace Nektar std::vector *m_vector; }; - // Probably should template fctor1 if that is possible? + // Probably should template fctor1 if that is possible? struct fctor4 { fctor4(std::vector *vec): @@ -333,24 +348,41 @@ namespace Nektar std::vector *m_vector; fctor4(); }; - + struct fctor5 { fctor5(std::vector *vec): m_vector(vec) { } - + void operator()(NekDouble n) const { m_vector->push_back(n); } - + private: std::vector *m_vector; fctor5(); }; + struct fctor6 + { + fctor6(std::vector *vec): + m_vector(vec) + { + } + + void operator()(unsigned int n) const + { + m_vector->push_back(n); + } + + private: + std::vector *m_vector; + fctor6(); + }; + }; } diff --git a/library/LibUtilities/BasicUtils/SessionReader.cpp b/library/LibUtilities/BasicUtils/SessionReader.cpp index b2096c1271c274034a115112ef28e864a13a5caf..4d40ad1357d6e5e1924a49879c6af73cfe7e66de 100644 --- a/library/LibUtilities/BasicUtils/SessionReader.cpp +++ b/library/LibUtilities/BasicUtils/SessionReader.cpp @@ -719,7 +719,7 @@ namespace Nektar ParameterMap::const_iterator paramIter = m_parameters.find(vName); ASSERTL0(paramIter != m_parameters.end(), "Required parameter '" + pName + "' not specified in session."); - pVar = (int)floor(paramIter->second); + pVar = (int)round(paramIter->second); } @@ -733,7 +733,7 @@ namespace Nektar ParameterMap::const_iterator paramIter = m_parameters.find(vName); if(paramIter != m_parameters.end()) { - pVar = (int)floor(paramIter->second); + pVar = (int)round(paramIter->second); } else { diff --git a/library/LibUtilities/BasicUtils/ShapeType.hpp b/library/LibUtilities/BasicUtils/ShapeType.hpp index e62add3bbfafc71cd8c03f0157a77e19bf7ab426..58cd68fb24bc56156a627a87f8b3a9470af08855 100644 --- a/library/LibUtilities/BasicUtils/ShapeType.hpp +++ b/library/LibUtilities/BasicUtils/ShapeType.hpp @@ -43,6 +43,8 @@ #undef min #endif +using namespace std; + namespace Nektar { namespace LibUtilities @@ -240,26 +242,19 @@ namespace Nektar "than order in 'c' direction."); // Count number of coefficients explicitly. - const int Pi = Na - 2, Qi = Nb - 2, Ri = Nc - 2; - int nCoeff = - 5 + // vertices - Pi * 2 + Qi * 2 + Ri * 4 + // base edges - Pi * Qi + // base quad - Pi * (2*Ri - Pi - 1) + // p-r triangles; - Qi * (2*Ri - Qi - 1); // q-r triangles; + int nCoeff = 0; // Count number of interior tet modes - for (int a = 0; a < Pi - 1; ++a) + for (int a = 0; a < Na; ++a) { - for (int b = 0; b < Qi - a - 1; ++b) + for (int b = 0; b < Nb; ++b) { - for (int c = 0; c < Ri - a - b -1; ++c) + for (int c = 0; c < Nc - max(a,b); ++c) { ++nCoeff; } } } - return nCoeff; } diff --git a/library/LibUtilities/BasicUtils/Thread.cpp b/library/LibUtilities/BasicUtils/Thread.cpp index d1d979c713eab5b1d856229f812b073296e26657..46cde8e8906fbcdd29c459b7bac64cd6ea3eeb88 100644 --- a/library/LibUtilities/BasicUtils/Thread.cpp +++ b/library/LibUtilities/BasicUtils/Thread.cpp @@ -48,11 +48,8 @@ namespace Thread */ ThreadManagerFactory& GetThreadManagerFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static ThreadManagerFactory instance; + return instance; } /** @@ -142,11 +139,8 @@ ThreadMaster::~ThreadMaster() */ ThreadMaster& GetThreadMaster() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static ThreadMaster instance; + return instance; } diff --git a/library/LibUtilities/BasicUtils/Thread.h b/library/LibUtilities/BasicUtils/Thread.h index 9c83019142459590de5045c86dd41c93df006753..d1cc713e91902ce4f726a4e1997a03708fada369 100644 --- a/library/LibUtilities/BasicUtils/Thread.h +++ b/library/LibUtilities/BasicUtils/Thread.h @@ -45,7 +45,6 @@ #include #include -#include namespace Nektar { diff --git a/library/LibUtilities/BasicUtils/Timer.cpp b/library/LibUtilities/BasicUtils/Timer.cpp index 5aa52f5af1e324172752fbf30ce4bc449339387d..221ea41041a55dfce59f25aa8e0fc2f10f1f8c40 100644 --- a/library/LibUtilities/BasicUtils/Timer.cpp +++ b/library/LibUtilities/BasicUtils/Timer.cpp @@ -37,91 +37,28 @@ namespace Nektar { - Timer::Timer() : - m_start(), - m_end(), - m_resolution() - { - } - - Timer::~Timer() - { - } - - void Timer::Start() - { - #ifdef _WIN32 - QueryPerformanceCounter(&m_start); - #elif defined(__APPLE__) - gettimeofday(&m_start, 0); - #else - clock_gettime(CLOCK_REALTIME, &m_start); - #endif - } - - void Timer::Stop() - { - #ifdef _WIN32 - QueryPerformanceCounter(&m_end); - #elif defined(__APPLE__) - gettimeofday(&m_end, 0); - #else - clock_gettime(CLOCK_REALTIME, &m_end); - #endif - } +namespace LibUtilities +{ - Timer::CounterType Timer::Elapsed() - { - #ifdef _WIN32 - CounterType result; - result.QuadPart = m_end.QuadPart - m_start.QuadPart; - return result; - #elif defined(__APPLE__) - CounterType result = m_end; - - if( result.tv_usec < m_start.tv_usec) - { - result.tv_sec -= 1; - result.tv_usec += 1000000; - } - - result.tv_sec -= m_start.tv_sec; - result.tv_usec -= m_start.tv_usec; - - return result; - #else - CounterType result = m_end; +void Timer::Start() +{ + m_start = Clock::now(); +} - if( result.tv_nsec < m_start.tv_nsec) - { - result.tv_sec -= 1; - result.tv_nsec += 1000000000; - } - - result.tv_sec -= m_start.tv_sec; - result.tv_nsec -= m_start.tv_nsec; +void Timer::Stop() +{ + m_end = Clock::now(); +} - return result; - #endif - } +Timer::Seconds Timer::Elapsed() +{ + return std::chrono::duration_cast(m_end - m_start); +} - NekDouble Timer::TimePerTest(unsigned int n) - { - #ifdef _WIN32 - CounterType frequency; - QueryPerformanceFrequency(&frequency); - return Elapsed().QuadPart/static_cast(n) * 1.0/frequency.QuadPart; - #elif defined(__APPLE__) - CounterType elapsed = Elapsed(); - NekDouble result = elapsed.tv_sec/static_cast(n) + - ( elapsed.tv_usec/static_cast(n) * 1.0e-6); - return result; - #else - CounterType elapsed = Elapsed(); - NekDouble result = elapsed.tv_sec/static_cast(n) + - ( elapsed.tv_nsec/static_cast(n) * 1.0e-9); - return result; - #endif - } +NekDouble Timer::TimePerTest(unsigned int n) +{ + return Elapsed().count() / static_cast(n); +} -} +} +} diff --git a/library/LibUtilities/BasicUtils/Timer.h b/library/LibUtilities/BasicUtils/Timer.h index a6fadb21c111be4555c3eb094e63e714ee8abceb..d7c5984b238051fe16265dac8a16ff04dbbb2f51 100644 --- a/library/LibUtilities/BasicUtils/Timer.h +++ b/library/LibUtilities/BasicUtils/Timer.h @@ -37,49 +37,44 @@ #ifndef NEKTAR_LIB_UTILITIES_BASIC_UTILS_TIMER_H #define NEKTAR_LIB_UTILITIES_BASIC_UTILS_TIMER_H -#ifdef _WIN32 -#include -#else -#include -#include -#endif +#include #include #include namespace Nektar { - class Timer - { - public: - #ifdef _WIN32 - typedef LARGE_INTEGER CounterType; - #elif defined(__APPLE__) - typedef timeval CounterType; - #else - typedef timespec CounterType; - #endif +namespace LibUtilities +{ + +class Timer +{ + public: + using Clock = std::chrono::steady_clock; + using CounterType = Clock::time_point; + using Seconds = std::chrono::duration; - public: - LIB_UTILITIES_EXPORT Timer(); - LIB_UTILITIES_EXPORT ~Timer(); + public: + LIB_UTILITIES_EXPORT Timer() = default; + LIB_UTILITIES_EXPORT ~Timer() = default; - LIB_UTILITIES_EXPORT void Start(); - LIB_UTILITIES_EXPORT void Stop(); - LIB_UTILITIES_EXPORT CounterType Elapsed(); + Timer(const Timer& rhs) = delete; + Timer& operator=(const Timer& rhs) = delete; - /// \brief Returns amount of seconds per iteration in - /// a test with n iterations. - LIB_UTILITIES_EXPORT NekDouble TimePerTest(unsigned int n); + LIB_UTILITIES_EXPORT void Start(); + LIB_UTILITIES_EXPORT void Stop(); + LIB_UTILITIES_EXPORT Seconds Elapsed(); - private: - Timer(const Timer& rhs); - Timer& operator=(const Timer& rhs); + /// \brief Returns amount of seconds per iteration in + /// a test with n iterations. + LIB_UTILITIES_EXPORT NekDouble TimePerTest(unsigned int n); - CounterType m_start; - CounterType m_end; - CounterType m_resolution; - }; + private: + CounterType m_start; + CounterType m_end; +}; + +} } #endif //NEKTAR_LIB_UTILITIES_BASIC_UTILS_TIMER_H diff --git a/library/LibUtilities/BasicUtils/VtkUtil.hpp b/library/LibUtilities/BasicUtils/VtkUtil.hpp new file mode 100644 index 0000000000000000000000000000000000000000..c874882f5b9cc64389fccef5f7813dcd79968fb9 --- /dev/null +++ b/library/LibUtilities/BasicUtils/VtkUtil.hpp @@ -0,0 +1,5 @@ +#if NEKTAR_HAS_VTK_6_0_0 +#include +#undef VTK_HAS_ISNAN +#undef VTK_HAS_ISINF +#endif diff --git a/library/LibUtilities/BasicUtils/mojo.hpp b/library/LibUtilities/BasicUtils/mojo.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/CMakeLists.txt b/library/LibUtilities/CMakeLists.txt index 81774a4f01b3096ece7c1ac3acf6f6e935fc1658..36fe0725d1973e1d62cfa8c099e919f0aad2879d 100644 --- a/library/LibUtilities/CMakeLists.txt +++ b/library/LibUtilities/CMakeLists.txt @@ -38,6 +38,7 @@ SET(BasicUtilsHeaders ./BasicUtils/Vmath.hpp ./BasicUtils/VmathArray.hpp ./BasicUtils/Metis.hpp + ./BasicUtils/VtkUtil.hpp ./BasicUtils/XmlUtil.h ) @@ -205,7 +206,6 @@ SET(LinearAlgebraHeaders ./LinearAlgebra/DgemmOverride.hpp ./LinearAlgebra/DgemvOverride.hpp ./LinearAlgebra/ExplicitInstantiation.h - ./LinearAlgebra/IsDgemmTraits.hpp ./LinearAlgebra/Lapack.hpp ./LinearAlgebra/MatrixBase.hpp ./LinearAlgebra/MatrixFuncs.h @@ -214,27 +214,19 @@ SET(LinearAlgebraHeaders ./LinearAlgebra/MatrixSize.hpp ./LinearAlgebra/MatrixStorageType.h ./LinearAlgebra/MatrixType.h - ./LinearAlgebra/MatrixVectorMultiplication.hpp ./LinearAlgebra/NekLinAlgAlgorithms.hpp ./LinearAlgebra/NekLinSys.hpp ./LinearAlgebra/NekMatrixFwd.hpp ./LinearAlgebra/NekMatrix.hpp - ./LinearAlgebra/NekMatrixMetadata.hpp ./LinearAlgebra/NekPoint.hpp ./LinearAlgebra/NekTypeDefs.hpp ./LinearAlgebra/NekVectorCommon.hpp - ./LinearAlgebra/NekVectorConstantSized.hpp ./LinearAlgebra/NekVectorFwd.hpp ./LinearAlgebra/NekVector.hpp - ./LinearAlgebra/NekVectorMetadata.hpp - ./LinearAlgebra/NekVectorTypeTraits.hpp - ./LinearAlgebra/NekVectorVariableSized.hpp ./LinearAlgebra/PointerWrapper.h ./LinearAlgebra/ScaledMatrix.hpp - ./LinearAlgebra/Space.h ./LinearAlgebra/StandardMatrix.hpp ./LinearAlgebra/TransF77.hpp - ./LinearAlgebra/StorageSmvBsr.hpp ./LinearAlgebra/NistSparseDescriptors.hpp ./LinearAlgebra/SparseDiagBlkMatrix.hpp @@ -243,10 +235,6 @@ SET(LinearAlgebraHeaders ./LinearAlgebra/SparseUtils.hpp ) -IF(NEKTAR_USING_SMV) - SET(LinearAlgebraHeaders ${LinearAlgebraHeaders} ./LinearAlgebra/LibSMV.hpp) -ENDIF(NEKTAR_USING_SMV) - SET(LinearAlgebraSources ./LinearAlgebra/blas.cpp ./LinearAlgebra/BlockMatrix.cpp @@ -263,10 +251,6 @@ SET(LinearAlgebraSources ./LinearAlgebra/SparseMatrix.cpp ) -IF(NEKTAR_USING_SMV) - SET(LinearAlgebraSources ${LinearAlgebraSources} ./LinearAlgebra/LibSMV.cpp) -ENDIF(NEKTAR_USING_SMV) - SET(MemoryHeaders ./Memory/ThreadSpecificPool.hpp ./Memory/NekMemoryManager.hpp @@ -390,11 +374,14 @@ SET(LibUtilitySources ADD_DEFINITIONS(-DLIB_UTILITIES_EXPORTS) -ADD_NEKTAR_LIBRARY(LibUtilities lib ${NEKTAR_LIBRARY_TYPE} ${LibUtilitySources} ${LibUtilityHeaders}) +ADD_NEKTAR_LIBRARY(LibUtilities + SOURCES ${LibUtilitySources} + HEADERS ${LibUtilityHeaders} + SUMMARY "Nektar++ LibUtilities library" + DESCRIPTION "This library provides core routines including linear algebra and integration with ThirdParty libraries.") ADD_DEPENDENCIES(LibUtilities modmetis-5.1.0 boost tinyxml-2.6.2 zlib-1.2.7) - ### Libraries needed specifically by LibUtilities ### ### Static libraries do not need to be linked by # Scotch @@ -449,44 +436,46 @@ IF( NEKTAR_USE_MPI ) SET_TARGET_PROPERTIES(LibUtilities PROPERTIES COMPILE_FLAGS "${THE_COMPILE_FLAGS} ${MPI_CXX_COMPILE_FLAGS}") ENDIF() + ADD_DEPENDENCIES(LibUtilities gsmpi-1.2.1) + + IF (THIRDPARTY_BUILD_MPI) + ADD_DEPENDENCIES(LibUtilities openmpi-1.10.3) + ENDIF() ENDIF( NEKTAR_USE_MPI ) # Lapack and Blas -IF( NEKTAR_USE_BLAS_LAPACK ) - IF( NEKTAR_USE_MKL AND MKL_FOUND ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${MKL} ) - SET_TARGET_PROPERTIES(LibUtilities - PROPERTIES COMPILE_FLAGS "${THE_COMPILE_FLAGS} -DMKL_ILP64") - ENDIF( NEKTAR_USE_MKL AND MKL_FOUND ) - - IF( NEKTAR_USE_ACML AND ACML_FOUND ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${ACML_TARGET_LINK_LIBRARIES} ) - ENDIF( NEKTAR_USE_ACML AND ACML_FOUND ) - - IF( NEKTAR_USE_ACCELERATE_FRAMEWORK ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${ACCELERATE_FRAMEWORK_LINK_FLAGS}) - ENDIF ( NEKTAR_USE_ACCELERATE_FRAMEWORK ) - - IF( NEKTAR_USE_WIN32_LAPACK ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${WIN32_LAPACK} ${WIN32_BLAS}) - INSTALL(FILES ${WIN32_LAPACK_DLL} ${WIN32_BLAS_DLL} - DESTINATION ${NEKTAR_BIN_DIR}) - ENDIF( NEKTAR_USE_WIN32_LAPACK ) - - IF( NEKTAR_USE_OPENBLAS AND OPENBLAS_FOUND ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${OPENBLAS}) - ENDIF( NEKTAR_USE_OPENBLAS AND OPENBLAS_FOUND ) - - IF( NEKTAR_USE_SMV AND SMV_FOUND ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${SMV_LIBRARY}) - ENDIF( NEKTAR_USE_SMV AND SMV_FOUND ) - - IF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) - TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${NATIVE_LAPACK} ${NATIVE_BLAS}) - ENDIF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) - -ENDIF( NEKTAR_USE_BLAS_LAPACK ) +IF( NEKTAR_USE_MKL AND MKL_FOUND ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${MKL} ) + SET_TARGET_PROPERTIES(LibUtilities + PROPERTIES COMPILE_FLAGS "${THE_COMPILE_FLAGS} -DMKL_ILP64") +ENDIF( NEKTAR_USE_MKL AND MKL_FOUND ) + +IF( NEKTAR_USE_ACML AND ACML_FOUND ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${ACML_TARGET_LINK_LIBRARIES} ) +ENDIF( NEKTAR_USE_ACML AND ACML_FOUND ) + +IF( NEKTAR_USE_ACCELERATE_FRAMEWORK ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${ACCELERATE_FRAMEWORK_LINK_FLAGS}) +ENDIF ( NEKTAR_USE_ACCELERATE_FRAMEWORK ) + +IF( NEKTAR_USE_WIN32_LAPACK ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${WIN32_LAPACK} ${WIN32_BLAS}) + INSTALL(FILES ${WIN32_LAPACK_DLL} ${WIN32_BLAS_DLL} + DESTINATION ${NEKTAR_BIN_DIR}) +ENDIF( NEKTAR_USE_WIN32_LAPACK ) + +IF( NEKTAR_USE_OPENBLAS AND OPENBLAS_FOUND ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${OPENBLAS}) +ENDIF( NEKTAR_USE_OPENBLAS AND OPENBLAS_FOUND ) + +IF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) + TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${NATIVE_LAPACK} ${NATIVE_BLAS}) +ENDIF( NEKTAR_USE_SYSTEM_BLAS_LAPACK ) + +IF(THIRDPARTY_BUILD_BLAS_LAPACK) + ADD_DEPENDENCIES(LibUtilities lapack-3.7.0) +ENDIF() IF( NEKTAR_USE_PETSC ) TARGET_LINK_LIBRARIES(LibUtilities LINK_PRIVATE ${PETSC_LIBRARIES}) @@ -500,5 +489,10 @@ IF( NEKTAR_USE_HDF5 ) ADD_DEPENDENCIES(LibUtilities hdf5-1.8.16) ENDIF( NEKTAR_USE_HDF5 ) -INSTALL(FILES ${ExpressionTemplates} DESTINATION ${NEKTAR_INCLUDE_DIR}/ExpressionTemplates COMPONENT dev) -INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/LibUtilities COMPONENT dev FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") +INSTALL(FILES ${ExpressionTemplates} + DESTINATION ${NEKTAR_INCLUDE_DIR}/ExpressionTemplates + COMPONENT dev) +INSTALL(DIRECTORY ./ + DESTINATION ${NEKTAR_INCLUDE_DIR}/LibUtilities + COMPONENT dev + FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") diff --git a/library/LibUtilities/Communication/Comm.cpp b/library/LibUtilities/Communication/Comm.cpp index 32c0aa6d836f992bac357e974c72aad64bd072ef..6f37b4d17d3921f496f5766ef0903677cd84d554 100644 --- a/library/LibUtilities/Communication/Comm.cpp +++ b/library/LibUtilities/Communication/Comm.cpp @@ -34,7 +34,6 @@ /////////////////////////////////////////////////////////////////////////////// #include -#include // for CreateUsingNew, NoDestroy, etc namespace Nektar { @@ -60,10 +59,8 @@ bool Comm::v_RemoveExistingFiles(void) CommFactory &GetCommFactory() { - typedef Loki::SingletonHolder - Type; - return Type::Instance(); + static CommFactory instance; + return instance; } } } diff --git a/library/LibUtilities/Communication/CommDataType.cpp b/library/LibUtilities/Communication/CommDataType.cpp index 18a4d259a0f1e7d042337803ceea7bf19545e4e9..c2cb3df5a44cd27eb1a4682f583eef5ced134153 100644 --- a/library/LibUtilities/Communication/CommDataType.cpp +++ b/library/LibUtilities/Communication/CommDataType.cpp @@ -82,27 +82,59 @@ int CommDataTypeGetSize(CommDataType dt) #endif } -/// Type trait mapping an int to MPI_INT -template <> CommDataType CommDataTypeTraits::type = MPI_INT; -/// Type trait mapping an unsigned int to MPI_UNSIGNED -template <> CommDataType CommDataTypeTraits::type = MPI_UNSIGNED; -/// Type trait mapping a long int to MPI_LONG -template <> CommDataType CommDataTypeTraits::type = MPI_LONG; -/// Type trait mapping an unsigned long int to MPI_UNSIGNED_LONG -template <> -CommDataType CommDataTypeTraits::type = MPI_UNSIGNED_LONG; -/// Type trait mapping a long long int to MPI_LONG_LONG -template <> CommDataType CommDataTypeTraits::type = MPI_LONG_LONG; -/// Type trait mapping an unsigned long long int to MPI_UNSIGNED_LONG_LONG -template <> -CommDataType CommDataTypeTraits::type = - MPI_UNSIGNED_LONG_LONG; -/// Type trait mapping a float to MPI_FLOAT -template <> CommDataType CommDataTypeTraits::type = MPI_FLOAT; -/// Type trait mapping a double to MPI_DOUBLE -template <> CommDataType CommDataTypeTraits::type = MPI_DOUBLE; -/// Type trait mapping a long double to MPI_LONG_DOUBLE -template <> -CommDataType CommDataTypeTraits::type = MPI_LONG_DOUBLE; +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_INT; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_UNSIGNED; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_LONG; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_UNSIGNED_LONG; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_LONG_LONG; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_UNSIGNED_LONG_LONG; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_FLOAT; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_DOUBLE; + return type; +} + +template<> CommDataType &CommDataTypeTraits::GetDataType() +{ + static CommDataType type = MPI_LONG_DOUBLE; + return type; +} + } } diff --git a/library/LibUtilities/Communication/CommDataType.h b/library/LibUtilities/Communication/CommDataType.h index b0aca4122f20155c97d8973772d93a4bc39b66c9..67853f6bfea554defe0215befe3c9800285d7e47 100644 --- a/library/LibUtilities/Communication/CommDataType.h +++ b/library/LibUtilities/Communication/CommDataType.h @@ -82,13 +82,9 @@ int CommDataTypeGetSize(CommDataType); template class CommDataTypeTraits { - LIB_UTILITIES_EXPORT static CommDataType type; - public: - static CommDataType &GetDataType() - { - return type; - } + LIB_UTILITIES_EXPORT static CommDataType &GetDataType(); + static void *GetPointer(T &val) { return &val; diff --git a/library/LibUtilities/FFT/NektarFFT.cpp b/library/LibUtilities/FFT/NektarFFT.cpp index dbc2ef88708645c3417c0a7438e6c9d767097d5e..794a623f29ddc7ac5467ca6a17fdd1b309692164 100644 --- a/library/LibUtilities/FFT/NektarFFT.cpp +++ b/library/LibUtilities/FFT/NektarFFT.cpp @@ -34,7 +34,6 @@ /////////////////////////////////////////////////////////////////////////////// #include -#include // for CreateUsingNew, NoDestroy, etc namespace Nektar { @@ -68,11 +67,8 @@ namespace Nektar NektarFFTFactory& GetNektarFFTFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static NektarFFTFactory instance; + return instance; } /** diff --git a/library/LibUtilities/Foundations/Basis.cpp b/library/LibUtilities/Foundations/Basis.cpp index 5a7e73518d6a08e1c1582d766e43f542a6d8218b..94d5d1a52b9918e6f7485f36512f6805ed0451b9 100644 --- a/library/LibUtilities/Foundations/Basis.cpp +++ b/library/LibUtilities/Foundations/Basis.cpp @@ -289,7 +289,7 @@ namespace Nektar /** \brief Orthogonal basis C - \f$\tilde \psi_{pqr}^c = \left ( {1 - \eta_3} \over 2 \right)^{p+q} P_r^{2p+2q+2, 0}(\eta_3)\f$ \\ + \f$\tilde \psi_{pqr}^c = \left ( {1 - \eta_3} \over 2 \right)^{p+q} P_r^{2p+2q+2, 0}(\eta_3)\f$ \ \ */ @@ -328,6 +328,65 @@ namespace Nektar } break; + /** \brief Orthogonal basis C for Pyramid expansion + (which is richer than tets) + + \f$\tilde \psi_{pqr}^c = \left ( {1 - \eta_3} \over + 2\right)^{pq} P_r^{2pq+2, 0}(\eta_3)\f$ \f$ \mbox{where + }pq = max(p+q,0) \f$ + + This orthogonal expansion has modes that are + always in the Cartesian space, however the + equivalent ModifiedPyr_C has vertex modes that do + not lie in this space. If one chooses \f$pq = + max(p+q-1,0)\f$ then the expansion will space the + same space as the vertices but the order of the + expanion in 'r' is reduced by one. + + 1) Eta_z values are the changing the fastest, then + r, q, and finally p. 2) r index increases by the + stride of numPoints. + */ + case eOrthoPyr_C: + { + int P = numModes - 1, Q = numModes - 1, R = numModes - 1; + NekDouble *mode = m_bdata.data(); + + for( int p = 0; p <= P; ++p ) + { + for( int q = 0; q <= Q; ++q ) + { + for( int r = 0; r <= R - max(p,q); ++r, mode += numPoints ) + { + // this offset allows for orthogonal + // expansion to span linear FE space + // of modified basis but means that + // the cartesian polynomial space + // spanned by the expansion is one + // order lower. + //int pq = max(p + q -1,0); + int pq = max(p + q,0); + + Polylib::jacobfd(numPoints, z.data(), mode, NULL, r, 2*pq + 2.0, 0.0); + for( int k = 0; k < numPoints; ++k ) + { + // Note factor of 0.5 is part of normalisation + mode[k] *= pow(0.5*(1.0 - z[k]), pq); + + // finish normalisation + mode[k] *= sqrt(r+pq+1.5); + } + } + } + } + + // Define derivative basis + Blas::Dgemm('n','n',numPoints,numModes*(numModes+1)* + (numModes+2)/6,numPoints,1.0, D, numPoints, + m_bdata.data(),numPoints,0.0,m_dbdata.data(),numPoints); + } + break; + case eModified_A: // Note the following packing deviates from the @@ -452,13 +511,13 @@ namespace Nektar case eModified_C: { // Note the following packing deviates from the - // definition in the Book by Karniadakis in two - // ways. 1) We put the vertex degrees of freedom - // at the lower index range to follow a more - // hierarchic structure. 2) We do not duplicate - // the singular vertex definition (or the - // duplicated face information in the book ) so - // that only a tetrahedral number + // definition in the Book by Karniadakis & + // Sherwin in two ways. 1) We put the vertex + // degrees of freedom at the lower index range to + // follow a more hierarchic structure. 2) We do + // not duplicate the singular vertex definition + // (or the duplicated face information in the book + // ) so that only a tetrahedral number // (i.e. (modes)*(modes+1)*(modes+2)/6) of modes // are required consistent with the orthogonal // basis. @@ -505,6 +564,113 @@ namespace Nektar } break; + case eModifiedPyr_C: + { + // Note the following packing deviates from the + // definition in the Book by Karniadakis & + // Sherwin in two ways. 1) We put the vertex + // degrees of freedom at the lower index range to + // follow a more hierarchic structure. 2) We do + // not duplicate the singular vertex definition + // so that only a pyramidic number + // (i.e. (modes)*(modes+1)*(2*modes+1)/6) of modes + // are required consistent with the orthogonal + // basis. + + // In the current structure the r index runs + // fastest rollowed by q and than the p index so + // that the matrix has a more compact structure + + // Generate Modified_B basis; + BasisKey ModBKey(eModified_B,m_basisKey.GetNumModes(), + m_basisKey.GetPointsKey()); + BasisSharedPtr ModB = BasisManager()[ModBKey]; + + Array ModB_data = ModB->GetBdata(); + + // Copy Modified_B basis into first + // (numModes*(numModes+1)/2)*numPoints entires of + // bdata. + + + int N; + int B_offset = 0; + int offset = 0; + + // Vertex 0,3,4, edges 3,4,7, face 4 + N = numPoints*(numModes)*(numModes+1)/2; + Vmath::Vcopy(N, &ModB_data[0],1,&m_bdata[0],1); + offset += N; + + B_offset += numPoints*(numModes); + // Vertex 1 edges 5 + N = numPoints*(numModes-1); + Vmath::Vcopy(N, &ModB_data[0]+B_offset,1,&m_bdata[0]+offset,1); + offset += N; + + // Vertex 2 edges 1,6, face 2 + N = numPoints*(numModes-1)*(numModes)/2; + Vmath::Vcopy(N, &ModB_data[0]+B_offset,1,&m_bdata[0]+offset,1); + offset += N; + + B_offset += numPoints*(numModes-1); + + NekDouble *one_m_z_pow, *one_p_z; + NekDouble *mode; + + mode = m_bdata.data() + offset; + + for(p = 2; p < numModes; ++p) + { + // edges 0 2, faces 1 3 + N = numPoints*(numModes-p); + Vmath::Vcopy(N, &ModB_data[0]+B_offset,1,mode,1); + mode += N; + Vmath::Vcopy(N, &ModB_data[0]+B_offset,1,mode,1); + mode += N; + B_offset += N; + + one_p_z = m_bdata.data()+numPoints; + + for(q = 2; q < numModes; ++q) + { + // face 0 + for(i = 0; i < numPoints; ++i) + { + // [(1-z)/2]^{p+q-2} Note in book it + // seems to suggest p+q-1 but that + // does not seem to give complete + // polynomial space for pyramids + mode[i] = pow(m_bdata[i],p+q-2); + } + + one_m_z_pow = mode; + mode += numPoints; + + // interior + for(int r = 1; r < numModes-max(p,q); ++r) + { + Polylib::jacobfd(numPoints,z.data(),mode,NULL,r-1,2*p+2*q-3,1.0); + + for(i = 0; i < numPoints; ++i) + { + mode[i] *= one_m_z_pow[i]*one_p_z[i]; + } + mode += numPoints; + } + } + + } + + // set up derivative of basis. + Blas::Dgemm('n','n',numPoints, + numModes*(numModes+1)*(2*numModes+1)/6, + numPoints,1.0,D,numPoints, + m_bdata.data(),numPoints,0.0, + m_dbdata.data(),numPoints); + } + break; + case eGLL_Lagrange: { mode = m_bdata.data(); @@ -523,7 +689,7 @@ namespace Nektar Blas::Dgemm('n', 'n', numPoints, numModes, numPoints, 1.0, D, numPoints, m_bdata.data(), numPoints, 0.0, m_dbdata.data(), numPoints); - + }//end scope break; case eGauss_Lagrange: diff --git a/library/LibUtilities/Foundations/Basis.h b/library/LibUtilities/Foundations/Basis.h index 5c73df2a7cf0c70dc6a9f906504740733995d515..5bb1568f2f5e5fbbebe1960e7ce4ad0f9ddb807d 100644 --- a/library/LibUtilities/Foundations/Basis.h +++ b/library/LibUtilities/Foundations/Basis.h @@ -102,7 +102,10 @@ namespace Nektar case eOrtho_C: value = m_nummodes*(m_nummodes+1)*(m_nummodes+2)/6; break; - + case eModifiedPyr_C: + case eOrthoPyr_C: + value = m_nummodes*(m_nummodes+1)*(2*m_nummodes+1)/6; + break; case eOrtho_A: case eModified_A: case eFourier: diff --git a/library/LibUtilities/Foundations/BasisType.h b/library/LibUtilities/Foundations/BasisType.h index 3e240ad7fbe095dcfc0383896d7713b0f154f62e..f24010b50fb3169748da2532df99d082569b9080 100644 --- a/library/LibUtilities/Foundations/BasisType.h +++ b/library/LibUtilities/Foundations/BasisType.h @@ -49,6 +49,8 @@ namespace Nektar eModified_A, //!< Principle Modified Functions \f$ \phi^a_p(z_i) \f$ eModified_B, //!< Principle Modified Functions \f$ \phi^b_{pq}(z_i) \f$ eModified_C, //!< Principle Modified Functions \f$ \phi^c_{pqr}(z_i) \f$ + eOrthoPyr_C, //!< Principle Orthogonal Functions \f$\widetilde{\psi}^c_{pqr}(z_i) for Pyramids\f$ + eModifiedPyr_C, //!< Principle Modified Functions \f$ \phi^c_{pqr}(z_i) for Pyramids\f$ eFourier, //!< Fourier Expansion \f$ \exp(i p\pi z_i)\f$ eGLL_Lagrange, //!< Lagrange for SEM basis \f$ h_p(z_i) \f$ eGauss_Lagrange, //!< Lagrange Polynomials using the Gauss points \f$ h_p(z_i) \f$ diff --git a/library/LibUtilities/Foundations/Foundations.hpp b/library/LibUtilities/Foundations/Foundations.hpp index ec1cd55df0a5114d8324483b707f29f260391638..98338279a8e76432552d0b14957bc21b9ebb11bb 100644 --- a/library/LibUtilities/Foundations/Foundations.hpp +++ b/library/LibUtilities/Foundations/Foundations.hpp @@ -53,6 +53,8 @@ namespace Nektar "Modified_A", "Modified_B", "Modified_C", + "OrthoPyr_C", + "ModifiedPyr_C", "Fourier", "GLL_Lagrange", "Gauss_Lagrange", @@ -99,9 +101,10 @@ namespace Nektar "NodalPrismElec", "NodalTriSPI", "NodalTetSPI", - "NodalPrismSPI" + "NodalPrismSPI", + "NodalQuadElec", + "NodalHexElec" }; - } // end of namespace } // end of namespace diff --git a/library/LibUtilities/Foundations/ManagerAccess.cpp b/library/LibUtilities/Foundations/ManagerAccess.cpp index 6248dfca863eaf787ec0225d887cff0111ac58f7..beeef100831194b73854de216f005ff43e9dc4fc 100644 --- a/library/LibUtilities/Foundations/ManagerAccess.cpp +++ b/library/LibUtilities/Foundations/ManagerAccess.cpp @@ -33,7 +33,6 @@ // /////////////////////////////////////////////////////////////////////////////// -#include #include #include #include @@ -109,12 +108,14 @@ namespace Nektar PointsManagerT &PointsManager(void) { - return Loki::SingletonHolder::Instance(); + static PointsManagerT instance; + return instance; } BasisManagerT &BasisManager(void) { - return Loki::SingletonHolder::Instance(); + static BasisManagerT instance; + return instance; } } // end of namespace LibUtilities diff --git a/library/LibUtilities/Interpreter/AnalyticExpressionEvaluator.cpp b/library/LibUtilities/Interpreter/AnalyticExpressionEvaluator.cpp index ccbbad46c7ceac5598974cf07b318b5699157529..f73a89f69a6d307dd4aedf99a93ab1e8972accfc 100644 --- a/library/LibUtilities/Interpreter/AnalyticExpressionEvaluator.cpp +++ b/library/LibUtilities/Interpreter/AnalyticExpressionEvaluator.cpp @@ -42,16 +42,6 @@ #include #include -#ifdef _MSC_VER -#include -#endif //MSC_VER - -#ifdef _MSC_VER -#define NEKTAR_MATH_NAME(x) BOOST_PP_CAT(_, x) -#else -#define NEKTAR_MATH_NAME(x) x -#endif - #if( BOOST_VERSION / 100 % 1000 >= 36 ) using namespace boost::spirit::classic; #else diff --git a/library/LibUtilities/LibUtilitiesDeclspec.h b/library/LibUtilities/LibUtilitiesDeclspec.h index 69696894fb123405d73ebaa9f4de30fd44ada8b0..71361ae95fd9e2f76b6b03821b20ba7131337378 100644 --- a/library/LibUtilities/LibUtilitiesDeclspec.h +++ b/library/LibUtilities/LibUtilitiesDeclspec.h @@ -43,7 +43,5 @@ #define LIB_UTILITIES_EXPORT #endif -#define LOKI_CLASS_LEVEL_THREADING - #endif //NEKTAR__LIB_UTILITIES_LIB_UTILITIES_DECLSPEC_H diff --git a/library/LibUtilities/LinearAlgebra/Blas.hpp b/library/LibUtilities/LinearAlgebra/Blas.hpp index a230624998e6a00e8726ee1d24f7ef621d02b92f..a0da624ce8e5aef83cbc0fd5e13e2de0f431c2cc 100644 --- a/library/LibUtilities/LinearAlgebra/Blas.hpp +++ b/library/LibUtilities/LinearAlgebra/Blas.hpp @@ -98,7 +98,6 @@ namespace Blas const double& beta, double* c, const int& ldc); } -#ifdef NEKTAR_USING_BLAS /// \brief BLAS level 1: Copy \a x to \a y static inline void Dcopy (const int& n, const double *x, const int& incx, double *y, const int& incy) @@ -227,55 +226,5 @@ namespace Blas { Dgemm('N','N',N,M,K,a,B,N,A,K,b,C,N) ; } -#endif //NEKTAR_USING_BLAS } #endif //NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_BLAS_HPP - -/*** -$Log: Blas.hpp,v $ -Revision 1.6 2008/04/06 05:55:11 bnelson -Changed ConstArray to Array - -Revision 1.5 2008/02/28 09:57:08 sherwin -Added array version of some routines - -Revision 1.4 2007/09/02 23:33:04 bnelson -*** empty log message *** - -Revision 1.3 2007/08/29 22:35:21 bnelson -Added upper triangular matrix time vector. - -Revision 1.2 2007/06/17 22:54:23 bnelson -Fixed the row-major matrix multiplication wrapper function. - -Revision 1.1 2007/04/03 03:59:24 bnelson -Moved Lapack.hpp, Blas.hpp, Transf77.hpp to LinearAlgebra - -Revision 1.3 2007/02/04 00:15:40 bnelson -*** empty log message *** - -Revision 1.2 2006/06/01 13:44:28 kirby -*** empty log message *** - -Revision 1.1 2006/06/01 11:07:52 kirby -*** empty log message *** - -Revision 1.1 2006/05/04 18:57:41 kirby -*** empty log message *** - -Revision 1.4 2006/02/26 21:13:45 bnelson -Fixed a variety of compiler errors caused by updates to the coding standard. - -Revision 1.3 2006/02/15 08:07:15 sherwin - -Put codes into standard although have not yet been compiled - -Revision 1.2 2006/02/12 21:51:42 sherwin - -Added licence - -Revision 1.1 2006/02/12 15:06:12 sherwin - -Changed .h files to .hpp - -**/ diff --git a/library/LibUtilities/LinearAlgebra/BlasArray.hpp b/library/LibUtilities/LinearAlgebra/BlasArray.hpp index b0cfd0e4ca72710286f36ce8c4620ddf7233f46d..8285aa66074c7d7035e586a30fb92be324f8da46 100644 --- a/library/LibUtilities/LinearAlgebra/BlasArray.hpp +++ b/library/LibUtilities/LinearAlgebra/BlasArray.hpp @@ -55,7 +55,6 @@ namespace Blas const double *y, const int& incy); } -#ifdef NEKTAR_USING_BLAS static inline void Dcopy (const int& n, const Nektar::Array &x, const int& incx, Nektar::Array &y, const int& incy) { ASSERTL1(static_cast(n*incx) <= x.num_elements()+x.GetOffset(),"Array out of bounds"); @@ -79,25 +78,5 @@ namespace Blas { return F77NAME(ddot)(n,&x[0],incx,&y[0],incy); } -#endif // NEKTAR_USING BLAS } #endif // NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_BLASARRAY_HPP - -/*** -$Log: BlasArray.hpp,v $ -Revision 1.5 2008/05/10 18:27:32 sherwin -Modifications necessary for QuadExp Unified DG Solver - -Revision 1.4 2008/04/30 02:55:51 bnelson -Fixed gcc compiler warning. - -Revision 1.3 2008/04/06 05:55:11 bnelson -Changed ConstArray to Array - -Revision 1.2 2008/03/12 15:22:45 pvos -Clean up of the code - -Revision 1.1 2008/02/28 09:57:08 sherwin -Added array version of some routines - -**/ diff --git a/library/LibUtilities/LinearAlgebra/BlockMatrix.cpp b/library/LibUtilities/LinearAlgebra/BlockMatrix.cpp index 242e98698163d1470051f9df889d37a5f228e7d8..e614f590b8b85fafc3d9e67194c52656e0f90526 100644 --- a/library/LibUtilities/LinearAlgebra/BlockMatrix.cpp +++ b/library/LibUtilities/LinearAlgebra/BlockMatrix.cpp @@ -166,12 +166,14 @@ namespace Nektar const typename NekMatrix, BlockMatrixTag>::InnerType* NekMatrix, BlockMatrixTag>::GetBlockPtr(unsigned int row, unsigned int column) const { - ASSERTL2(row < m_numberOfBlockRows, std::string("Row ") + boost::lexical_cast(row) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + - std::string(" rows")); - ASSERTL2(column < m_numberOfBlockColumns, std::string("Column ") + boost::lexical_cast(column) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + - std::string(" columns")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? row < m_numberOfBlockRows : row < m_numberOfBlockColumns, + std::string("Row ") + boost::lexical_cast(row) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + + std::string(" rows")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? column < m_numberOfBlockColumns : column < m_numberOfBlockColumns, + std::string("Column ") + boost::lexical_cast(column) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + + std::string(" columns")); int x = CalculateBlockIndex(row,column); if (x == -1) { @@ -187,12 +189,14 @@ namespace Nektar boost::shared_ptr, BlockMatrixTag>::InnerType> NekMatrix, BlockMatrixTag>::GetBlock(unsigned int row, unsigned int column) const { - ASSERTL2(row < m_numberOfBlockRows, std::string("Row ") + boost::lexical_cast(row) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + - std::string(" rows")); - ASSERTL2(column < m_numberOfBlockColumns, std::string("Column ") + boost::lexical_cast(column) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + - std::string(" columns")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? row < m_numberOfBlockRows : row < m_numberOfBlockColumns, + std::string("Row ") + boost::lexical_cast(row) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + + std::string(" rows")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? column < m_numberOfBlockColumns : column < m_numberOfBlockRows, + std::string("Column ") + boost::lexical_cast(column) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + + std::string(" columns")); int x = CalculateBlockIndex(row,column); if (x < 0) { @@ -208,12 +212,14 @@ namespace Nektar boost::shared_ptr, BlockMatrixTag>::InnerType>& NekMatrix, BlockMatrixTag>::GetBlock(unsigned int row, unsigned int column) { - ASSERTL2(row < m_numberOfBlockRows, std::string("Row ") + boost::lexical_cast(row) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + - std::string(" rows")); - ASSERTL2(column < m_numberOfBlockColumns, std::string("Column ") + boost::lexical_cast(column) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + - std::string(" columns")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? row < m_numberOfBlockRows : row < m_numberOfBlockColumns, + std::string("Row ") + boost::lexical_cast(row) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + + std::string(" rows")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? column < m_numberOfBlockColumns : column < m_numberOfBlockRows, + std::string("Column ") + boost::lexical_cast(column) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + + std::string(" columns")); int x = CalculateBlockIndex(row,column); if (x == -1) { @@ -228,12 +234,14 @@ namespace Nektar template void NekMatrix, BlockMatrixTag>::SetBlock(unsigned int row, unsigned int column, boost::shared_ptr& m) { - ASSERTL2(row < m_numberOfBlockRows, std::string("Row ") + boost::lexical_cast(row) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + - std::string(" rows")); - ASSERTL2(column < m_numberOfBlockColumns, std::string("Column ") + boost::lexical_cast(column) + - std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + - std::string(" columns")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? row < m_numberOfBlockRows : row < m_numberOfBlockColumns, + std::string("Row ") + boost::lexical_cast(row) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockRows) + + std::string(" rows")); + ASSERTL2(this->GetTransposeFlag() == 'N' ? column < m_numberOfBlockColumns : column < m_numberOfBlockRows, + std::string("Column ") + boost::lexical_cast(column) + + std::string(" requested in a block matrix with a maximum of ") + boost::lexical_cast(m_numberOfBlockColumns) + + std::string(" columns")); m_data[CalculateBlockIndex(row, column)] = InnerType::CreateWrapper(m); } diff --git a/library/LibUtilities/LinearAlgebra/IsDgemmTraits.hpp b/library/LibUtilities/LinearAlgebra/IsDgemmTraits.hpp deleted file mode 100644 index ab0c01417a74fea63241d7c8ff072a26d0fed513..0000000000000000000000000000000000000000 --- a/library/LibUtilities/LinearAlgebra/IsDgemmTraits.hpp +++ /dev/null @@ -1 +0,0 @@ -// \ No newline at end of file diff --git a/library/LibUtilities/LinearAlgebra/Lapack.hpp b/library/LibUtilities/LinearAlgebra/Lapack.hpp index 6b54adfd92b4c9450f5a99962fbecececc11aa55..ba553d1cda0d51fbe8e5771fedd3fe84337dc44e 100644 --- a/library/LibUtilities/LinearAlgebra/Lapack.hpp +++ b/library/LibUtilities/LinearAlgebra/Lapack.hpp @@ -103,8 +103,6 @@ namespace Lapack // Non-standard versions. void dgetrs(char trans, int matrixRows, int matrixColumns, const double* A, double* x); -#ifdef NEKTAR_USING_LAPACK - /// \brief factor a real packed-symmetric matrix using Bunch-Kaufman /// pivoting. static inline void Dsptrf (const char& uplo, const int& n, @@ -251,54 +249,5 @@ namespace Lapack { F77NAME(dtptrs) (uplo, trans, diag, n, nrhs, a, b, ldb, info); } - - -#endif //NEKTAR_USING_LAPACK } #endif //NEKTAR_LIB_UTILITIES_LAPACK_HPP - -/*** -$Log: Lapack.hpp,v $ -Revision 1.5 2008/06/01 19:04:57 bnelson -Added triangular solvers. - -Revision 1.4 2008/04/30 02:57:15 bnelson -Fixed gcc compiler warning. - -Revision 1.3 2008/04/06 05:55:11 bnelson -Changed ConstArray to Array - -Revision 1.2 2007/04/10 14:00:45 sherwin -Update to include SharedArray in all 2D element (including Nodal tris). Have also remvoed all new and double from 2D shapes in StdRegions - -Revision 1.1 2007/04/03 03:59:24 bnelson -Moved Lapack.hpp, Blas.hpp, Transf77.hpp to LinearAlgebra - -Revision 1.3 2007/02/04 00:15:40 bnelson -*** empty log message *** - -Revision 1.2 2006/06/01 13:44:28 kirby -*** empty log message *** - -Revision 1.1 2006/06/01 11:07:52 kirby -*** empty log message *** - -Revision 1.1 2006/05/04 18:57:43 kirby -*** empty log message *** - -Revision 1.4 2006/02/26 21:13:45 bnelson -Fixed a variety of compiler errors caused by updates to the coding standard. - -Revision 1.3 2006/02/15 08:07:15 sherwin - -Put codes into standard although have not yet been compiled - -Revision 1.2 2006/02/12 21:51:42 sherwin - -Added licence - -Revision 1.1 2006/02/12 15:06:12 sherwin - -Changed .h files to .hpp - -**/ diff --git a/library/LibUtilities/LinearAlgebra/LibSMV.cpp b/library/LibUtilities/LinearAlgebra/LibSMV.cpp deleted file mode 100644 index 7afdaff177731d70aec776e551eb9a4d5816e962..0000000000000000000000000000000000000000 --- a/library/LibUtilities/LinearAlgebra/LibSMV.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/////////////////////////////////////////////////////////////////////////////// -// -// File: LibSMV.cpp -// -// For more information, please see: http://www.nektar.info -// -// The MIT License -// -// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), -// Department of Aeronautics, Imperial College London (UK), and Scientific -// Computing and Imaging Institute, University of Utah (USA). -// -// License for the specific language governing rights and limitations under -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. -// -// Description: wrapper of functions around SMV routines -// -/////////////////////////////////////////////////////////////////////////////// - -#ifdef NEKTAR_USING_SMV - -#include - - -// Translations for using Fortran version of SMV -namespace Smv -{ - - /// \brief LibSmv matrix-vector multiply: Y = Y + A*X where A is [m x k]-matrix - template - void Smvn (const int& m, const T* a, const T* x, T* y) - { - F77NAME(smv) (m,a,x,y); - } - - /// Explicit instantiation with type double: - - template LIB_UTILITIES_EXPORT void Smvn (const int& m, - const double* a, const double* x, double* y); - - -} - -#endif diff --git a/library/LibUtilities/LinearAlgebra/LibSMV.hpp b/library/LibUtilities/LinearAlgebra/LibSMV.hpp deleted file mode 100644 index e3d5d0234b70fd5adb8b19f2d44ec94c2150e930..0000000000000000000000000000000000000000 --- a/library/LibUtilities/LinearAlgebra/LibSMV.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/////////////////////////////////////////////////////////////////////////////// -// -// File: LibSMV.hpp -// -// For more information, please see: http://www.nektar.info -// -// The MIT License -// -// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), -// Department of Aeronautics, Imperial College London (UK), and Scientific -// Computing and Imaging Institute, University of Utah (USA). -// -// License for the specific language governing rights and limitations under -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. -// -// Description: wrapper of functions around SMV routines -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_SMV_HPP -#define NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_SMV_HPP - -#ifdef NEKTAR_USING_SMV - -#include -#include -#include - -#include - - -namespace Smv -{ - // Translations for using Fortran version of SMV - extern "C" - { - /// \brief Matrix-vector multiply C = C + A*B where - /// A is [m x m], B is [m] and C is [m]. - /// Expected: no matrix transpose, row-major ordering, - /// unit increments and no matrix views (lda != m|n|k), - /// type double, no constant factors. - void F77NAME(smv) ( - const int& m, - const double* a, const double* b, double* c); - - /// \brief Rank-specific matrix-vector LibSMV multiply - /// kernels. Row-major ordering, unit increments, - /// type double. May eventually call dgemv - /// implementation that LibSMV is linked against. - #define BOOST_PP_LOCAL_MACRO(n) \ - void F77NAME(smv_##n) \ - (const double* a, \ - const double* b, \ - double* c); - #define BOOST_PP_LOCAL_LIMITS (1, LIBSMV_MAX_RANK) - #include BOOST_PP_LOCAL_ITERATE() - } - - - /// \brief LibSmv matrix-vector multiply: Y = Y + A*X where A is [m x m]-matrix - template - LIB_UTILITIES_EXPORT void Smvn (const int& m, - const T* a, const T* x, T* y); - -} -#endif // NEKTAR_USING_SMV -#endif //NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_SMV_HPP diff --git a/library/LibUtilities/LinearAlgebra/MatrixFuncs.h b/library/LibUtilities/LinearAlgebra/MatrixFuncs.h index fe829dd7185fd8171e03edaccc6d12693ea23842..056871b08eb3169aeedb642b897c130b08dbcdf0 100644 --- a/library/LibUtilities/LinearAlgebra/MatrixFuncs.h +++ b/library/LibUtilities/LinearAlgebra/MatrixFuncs.h @@ -87,7 +87,6 @@ namespace Nektar Array& data, const char transpose) { -#ifdef NEKTAR_USING_BLAS ASSERTL0(rows==columns, "Only square matrices can be inverted."); ASSERTL0(transpose=='N', "Only untransposed matrices may be inverted."); @@ -123,11 +122,6 @@ namespace Nektar std::string message = "ERROR: Element u_" + boost::lexical_cast(info) + boost::lexical_cast(info) + " is 0 from dgetri"; ASSERTL0(false, message.c_str()); } - - #else - // error Full matrix inversion not supported without blas. - BOOST_STATIC_ASSERT(sizeof(DataType) == 0); - #endif } static void EigenSolve(unsigned int n, @@ -206,7 +200,6 @@ namespace Nektar static void Invert(unsigned int rows, unsigned int columns, Array& data) { -#ifdef NEKTAR_USING_BLAS ASSERTL0(rows==columns, "Only square matrices can be inverted."); int n = columns; @@ -240,11 +233,6 @@ namespace Nektar std::string message = "ERROR: Element u_" + boost::lexical_cast(info) + boost::lexical_cast(info) + " is 0 from dsptri"; ASSERTL0(false, message.c_str()); } - -#else - // error Full matrix inversion not supported without blas. - BOOST_STATIC_ASSERT(sizeof(DataType) == 0); -#endif } static boost::tuples::tuple diff --git a/library/LibUtilities/LinearAlgebra/MatrixOperationsDeclarations.hpp b/library/LibUtilities/LinearAlgebra/MatrixOperationsDeclarations.hpp index 1e7f672685e0aa0290c54a08ab45cfc1bd49ee18..758a50142e0c65485629d0699ba18f7d94c72ab4 100644 --- a/library/LibUtilities/LinearAlgebra/MatrixOperationsDeclarations.hpp +++ b/library/LibUtilities/LinearAlgebra/MatrixOperationsDeclarations.hpp @@ -45,8 +45,6 @@ #include #include #include -#include -#include #include #include diff --git a/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.cpp b/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.cpp index 2ef20f3bc91aed0a9682191aed4bac4cf6978f45..ded67ace264a4bdb846216a5fedf85d6ba872156 100644 --- a/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.cpp +++ b/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.cpp @@ -34,7 +34,6 @@ /////////////////////////////////////////////////////////////////////////////// #include -#include #include #include diff --git a/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.hpp b/library/LibUtilities/LinearAlgebra/MatrixVectorMultiplication.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/LinearAlgebra/NekMatrixMetadata.hpp b/library/LibUtilities/LinearAlgebra/NekMatrixMetadata.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/LinearAlgebra/NekPoint.hpp b/library/LibUtilities/LinearAlgebra/NekPoint.hpp index bb820252e1e912b46237dc115dd1e99023ef0c08..fe91e19d8c674a9b45bf870b33c3968a49f8dd50 100644 --- a/library/LibUtilities/LinearAlgebra/NekPoint.hpp +++ b/library/LibUtilities/LinearAlgebra/NekPoint.hpp @@ -39,7 +39,6 @@ #include #include #include -#include #include #include diff --git a/library/LibUtilities/LinearAlgebra/NekVector.hpp b/library/LibUtilities/LinearAlgebra/NekVector.hpp index 1b8b07848f9599fde0023f85896f87db8ba352ee..50fc21a79e58db913fbb965fd44a8be5bb68f94d 100644 --- a/library/LibUtilities/LinearAlgebra/NekVector.hpp +++ b/library/LibUtilities/LinearAlgebra/NekVector.hpp @@ -42,10 +42,7 @@ #include -#include #include -#include -#include #include #include diff --git a/library/LibUtilities/LinearAlgebra/NekVectorConstantSized.hpp b/library/LibUtilities/LinearAlgebra/NekVectorConstantSized.hpp deleted file mode 100644 index 7d78c63f39d79e31ab6f05cde6db704f6ad1f953..0000000000000000000000000000000000000000 --- a/library/LibUtilities/LinearAlgebra/NekVectorConstantSized.hpp +++ /dev/null @@ -1,507 +0,0 @@ -///////////////////////////////////////////////////////////////////////////////// -//// -//// File: NekConstantSizedVector.hpp -//// -//// For more information, please see: http://www.nektar.info -//// -//// The MIT License -//// -//// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), -//// Department of Aeronautics, Imperial College London (UK), and Scientific -//// Computing and Imaging Institute, University of Utah (USA). -//// -//// License for the specific language governing rights and limitations under -//// Permission is hereby granted, free of charge, to any person obtaining a -//// copy of this software and associated documentation files (the "Software"), -//// to deal in the Software without restriction, including without limitation -//// the rights to use, copy, modify, merge, publish, distribute, sublicense, -//// and/or sell copies of the Software, and to permit persons to whom the -//// Software is furnished to do so, subject to the following conditions: -//// -//// The above copyright notice and this permission notice shall be included -//// in all copies or substantial portions of the Software. -//// -//// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -//// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -//// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -//// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -//// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -//// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -//// DEALINGS IN THE SOFTWARE. -//// -//// Description: Generic N-Dimensional Vector. -//// -///////////////////////////////////////////////////////////////////////////////// -// -//#ifndef NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_CONSTANT_SIZED_VECTOR_HPP -//#define NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_CONSTANT_SIZED_VECTOR_HPP -// -//#include -//#include -//#include -//#include -// -//#include -//#include -// -// -//#include -//#include -// -// -//namespace Nektar -//{ -// -// -// template -// class NekVector -// { -// public: -// typedef dataType DataType; -// -// NekVector() : -// m_impl() -// { -// std::fill_n(m_impl, dim::Value, DataType()); -// } -// -// explicit NekVector(typename boost::call_traits::const_reference a) : -// m_impl() -// { -// std::fill_n(m_impl, dim::Value, a); -// } -// -// /// \brief Constructs a vector with initial value a. -// /// -// /// \param size The size of the vector. Since the size of the vector is specified in the template, this parameter is ignored. -// /// \param a The value with which to initialize the vector. -// /// -// /// This constructor is provided for generic methods which don't know if the vector they -// /// are creating is constant sized or variable sized. -// NekVector(unsigned int size, typename boost::call_traits::const_reference a) : -// m_impl() -// { -// ASSERTL1(size == dim::Value, std::string("Attempting to construct a constant sized vector of size ") + -// boost::lexical_cast(dim::Value) + -// std::string(" with a size ") + -// boost::lexical_cast(size)); -// std::fill_n(m_impl, dim::Value, a); -// } -// -// explicit NekVector(const std::string& vectorValues) : -// m_impl() -// { -// std::vector values = FromString(vectorValues); -// -// ASSERTL0(values.size() == dim::Value, "Error converting string values to vector"); -// -// std::copy(values.begin(), values.end(), &m_impl[0]); -// } -// -// NekVector(typename boost::call_traits::const_reference x, -// typename boost::call_traits::const_reference y, -// typename boost::call_traits::const_reference z) : -// m_impl() -// { -// BOOST_STATIC_ASSERT(dim::Value == 3); -// m_impl[0] = x; -// m_impl[1] = y; -// m_impl[2] = z; -// } -// -// NekVector(typename boost::call_traits::const_reference x, -// typename boost::call_traits::const_reference y, -// typename boost::call_traits::const_reference z, -// typename boost::call_traits::const_reference w) : -// m_impl() -// { -// BOOST_STATIC_ASSERT(dim::Value == 4); -// m_impl[0] = x; -// m_impl[1] = y; -// m_impl[2] = z; -// m_impl[3] = w; -// } -// -// NekVector(const NekVector& rhs) : -// m_impl() -// { -// std::copy(rhs.m_impl, rhs.m_impl+dim::Value, m_impl); -// } -// -// explicit NekVector(const DataType* const ptr) : -// m_impl() -// { -// std::copy(ptr, ptr+dim::Value, &m_impl[0]); -// } -// -// ~NekVector() -// { -//#ifdef _DEBUG -// std::fill_n(m_impl, dim::Value, DataType()); -//#endif //_DEBUG -// } -// -//#ifdef NEKTAR_USE_EXPRESSION_TEMPLATES -// template -// NekVector(const expt::Node& rhs) : -// m_impl() -// { -// #ifdef _DEBUG -// boost::tuple sizes = -// MatrixSize, typename Node::Indices, 0>::GetRequiredSize(rhs.GetData()); -// ASSERTL0(sizes.get<0>() == dim::Value, "Data sizes are not equal."); -// #endif -// -// /// TODO Make sure this works correctly with eWrapper -// //BOOST_MPL_ASSERT(( boost::is_same::ResultType, NekVector > )); -// expt::ExpressionEvaluator::Evaluate(rhs, *this); -// } -//#endif -// -//#ifdef NEKTAR_USE_EXPRESSION_TEMPLATES -// template -// NekVector& operator=(const expt::Node& rhs) -// { -// rhs.Evaluate(*this); -// return *this; -// } -//#endif -// -// /// \brief Returns the number of dimensions for the point. -// unsigned int GetDimension() const -// { -// return dim::Value; -// } -// -// /// \brief Treating the vector as a column vector, how many rows it has. -// unsigned int GetRows() const -// { -// return dim::Value; -// } -// -// const DataType* GetRawPtr() const -// { -// return &m_impl[0]; -// } -// -// typedef const DataType* const_iterator; -// const_iterator begin() const { return GetRawPtr(); } -// const_iterator end() const { return GetRawPtr() + GetDimension(); } -// -// typename boost::call_traits::const_reference operator()(unsigned int i) const -// { -// ASSERTL1(( i >= 0) && (i < GetDimension()), "Invalid access to m_data via parenthesis operator"); -// return m_impl[i]; -// } -// -// typename boost::call_traits::const_reference operator[](unsigned int i) const -// { -// return m_impl[i]; -// } -// -// typename boost::call_traits::const_reference x() const -// { -// BOOST_STATIC_ASSERT(dim::Value >= 1); -// return m_impl[0]; -// } -// -// typename boost::call_traits::const_reference y() const -// { -// BOOST_STATIC_ASSERT(dim::Value >= 2); -// return m_impl[1]; -// } -// -// typename boost::call_traits::const_reference z() const -// { -// BOOST_STATIC_ASSERT(dim::Value >= 3); -// return m_impl[2]; -// } -// -// typename boost::call_traits::const_reference w() const -// { -// BOOST_STATIC_ASSERT(dim::Value >= 4); -// return m_impl[3]; -// } -// -// -//// typename boost::call_traits::reference x() -//// { -//// BOOST_STATIC_ASSERT(dim::Value >= 1); -//// return m_impl[0]; -//// } -//// -//// typename boost::call_traits::reference y() -//// { -//// BOOST_STATIC_ASSERT(dim::Value >= 2); -//// return m_impl[1]; -//// } -//// -//// typename boost::call_traits::reference z() -//// { -//// BOOST_STATIC_ASSERT(dim::Value >= 3); -//// return m_impl[2]; -//// } -//// -//// -//// typename boost::call_traits::reference w() -//// { -//// BOOST_STATIC_ASSERT(dim::Value >= 4); -//// return m_impl[3]; -//// } -// -// // Unitary operators -// NekVector operator-() const { return Negate(*this); } -// -// DataType Magnitude() const { return Nektar::Magnitude(*this); } -// -// DataType Dot(const NekVector& rhs) const { return Nektar::Dot(*this, rhs); } -// -// NekVector Cross(const NekVector& rhs) const -// { -// return Nektar::Cross(*this, rhs); -// } -// -// std::string AsString() const { return Nektar::AsString(*this); } -// -// // Norms -// DataType L1Norm() const { return Nektar::L1Norm(*this); } -// DataType L2Norm() const { return Nektar::L2Norm(*this); } -// DataType InfinityNorm() const { return Nektar::InfinityNorm(*this); } -// -// PointerWrapper GetWrapperType() const { return eCopy; } -// -// protected: -// DataType* GetImpl() { return &m_impl[0]; } -// -// NekVector& operator=(const NekVector& rhs) -// { -// std::copy(rhs.m_impl, rhs.m_impl+dim::Value, m_impl); -// return *this; -// } -// -// NekVector& operator=(const NekVector& rhs) -// { -// ASSERTL0(GetDimension() == rhs.GetDimension(), "Assignment to a constant sized vector must have the same number of elements."); -// std::copy(rhs.GetRawPtr(), rhs.GetRawPtr()+dim::Value, m_impl); -// return *this; -// } -// -// private: -// DataType m_impl[dim::Value]; -// -// }; -// -// // \param DataType The type of data held by each element of the vector. -// // \param dim The number of elements in the vector. If set to 0, the vector -// // will have a variable number of elements. -// // \param space The space of the vector. -// template -// class NekVector : public NekVector -// { -// public: -// typedef NekVector BaseType; -// -// /// \brief Creates a constant sized vector with each element initialized to -// /// the default value for DataType. -// NekVector() : BaseType() {} -// -// /// \brief Creates a constant sized vector with each element set to a. -// /// \param a The value to assign to each element of the vector. -// explicit NekVector(typename boost::call_traits::const_reference a) : -// BaseType(a) {} -// -// NekVector(unsigned int size, typename boost::call_traits::const_reference a) : -// BaseType(size, a) {} -// -// /// \brief Creates a vector from the elements in a delimited string. -// /// \param vectorValues A string the the vector values. -// /// -// /// -// explicit NekVector(const std::string& vectorValues) : BaseType(vectorValues) {} -// -// NekVector(typename boost::call_traits::const_reference x, -// typename boost::call_traits::const_reference y, -// typename boost::call_traits::const_reference z) : -// BaseType(x, y, z) {} -// -// NekVector(typename boost::call_traits::const_reference x, -// typename boost::call_traits::const_reference y, -// typename boost::call_traits::const_reference z, -// typename boost::call_traits::const_reference w) : -// BaseType(x,y,z,w) {} -// -// NekVector(const NekVector& rhs) : -// BaseType(rhs) {} -// -// explicit NekVector(const DataType* const ptr) : -// BaseType(ptr) {} -// -// -//#ifdef NEKTAR_USE_EXPRESSION_TEMPLATES -// template -// NekVector(const expt::Node& rhs) : -// BaseType() -// { -// #ifdef _DEBUG -// boost::tuple sizes = -// MatrixSize, typename expt::Node::Indices, 0>::GetRequiredSize(rhs.GetData()); -// ASSERTL0(sizes.get<0>() == dim::Value, "Data sizes are not equal."); -// #endif -// -// /// TODO Make sure this works correctly with eWrapper -// //BOOST_MPL_ASSERT(( boost::is_same::ResultType, NekVector > )); -// expt::ExpressionEvaluator::Evaluate(rhs, *this); -// } -//#endif -// -//#ifdef NEKTAR_USE_EXPRESSION_TEMPLATES -// template -// NekVector& operator=(const expt::Node& rhs) -// { -// rhs.Evaluate(*this); -// return *this; -// } -//#endif -// -// -// -// NekVector& operator=(const NekVector& rhs) -// { -// BaseType::operator=(rhs); -// return *this; -// } -// -// NekVector& operator=(const NekVector& rhs) -// { -// BaseType::operator=(rhs); -// return *this; -// } -// -// using BaseType::GetRawPtr; -// DataType* GetRawPtr() -// { -// return this->GetImpl(); -// } -// -// -// typedef DataType* iterator; -// -// -// iterator begin() { return GetRawPtr(); } -// iterator end() { return GetRawPtr() + this->GetDimension(); } -// -// -// /// \brief Returns i^{th} element. -// /// \param i The element to return. -// /// \pre i < dim -// /// \return A reference to the i^{th} element. -// /// -// /// Retrieves the i^{th} element. Since it returns a reference you may -// /// assign a new value (i.e., p(2) = 3.2;) -// /// -// /// This operator performs range checking. -// typename boost::call_traits::reference operator()(unsigned int i) -// { -// ASSERTL1((i >= 0) && (i < this->GetDimension()), "Invalid access to m_data via parenthesis operator"); -// return this->GetImpl()[i]; -// } -// -// typename boost::call_traits::reference operator[](unsigned int i) -// { -// return this->GetImpl()[i]; -// } -// -// using BaseType::operator(); -// using BaseType::operator[]; -// -// using BaseType::x; -// typename boost::call_traits::reference x() -// { -// BOOST_STATIC_ASSERT(dim::Value >= 1); -// return this->GetImpl()[0]; -// } -// -// using BaseType::y; -// typename boost::call_traits::reference y() -// { -// BOOST_STATIC_ASSERT(dim::Value >= 2); -// return this->GetImpl()[1]; -// } -// -// using BaseType::z; -// typename boost::call_traits::reference z() -// { -// BOOST_STATIC_ASSERT(dim::Value >= 3); -// return this->GetImpl()[2]; -// } -// -// using BaseType::w; -// typename boost::call_traits::reference w() -// { -// BOOST_STATIC_ASSERT(dim::Value >= 4); -// return this->GetImpl()[3]; -// } -// -// void SetX(typename boost::call_traits::const_reference val) -// { -// BOOST_STATIC_ASSERT(dim::Value >= 1); -// this->GetImpl()[0] = val; -// } -// -// void SetY(typename boost::call_traits::const_reference val) -// { -// BOOST_STATIC_ASSERT(dim::Value >= 2); -// this->GetImpl()[1] = val; -// } -// -// void SetZ(typename boost::call_traits::const_reference val) -// { -// BOOST_STATIC_ASSERT(dim::Value >= 3); -// this->GetImpl()[2] = val; -// } -// -// void SetW(typename boost::call_traits::const_reference val) -// { -// BOOST_STATIC_ASSERT(dim::Value >= 4); -// this->GetImpl()[3] = val; -// } -// -// /// Arithmetic Routines -// -// NekVector& operator+=(const NekVector& rhs) -// { -// AddEqual(*this, rhs); -// return *this; -// } -// -// NekVector& operator-=(const NekVector& rhs) -// { -// SubtractEqual(*this, rhs); -// return *this; -// } -// -// NekVector& operator*=(typename boost::call_traits::const_reference rhs) -// { -// MultiplyEqual(*this, rhs); -// return *this; -// } -// -// NekVector& operator/=(typename boost::call_traits::const_reference rhs) -// { -// DivideEqual(*this, rhs); -// return *this; -// } -// -// -// -// void Normalize() { return Nektar::Normalize(*this); } -// -// -// -// }; -// -// -// -//} -// -//#endif // NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_CONSTANT_SIZED_VECTOR_HPP diff --git a/library/LibUtilities/LinearAlgebra/NekVectorFwd.hpp b/library/LibUtilities/LinearAlgebra/NekVectorFwd.hpp index 12bcc4ea3c68d5347d341603685d1538a27148e4..cafc329e958315b9fd52b59c5716f10ee562fb90 100644 --- a/library/LibUtilities/LinearAlgebra/NekVectorFwd.hpp +++ b/library/LibUtilities/LinearAlgebra/NekVectorFwd.hpp @@ -37,7 +37,6 @@ #define NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_VECTOR_FWD_HPP -#include #include diff --git a/library/LibUtilities/LinearAlgebra/NekVectorMetadata.hpp b/library/LibUtilities/LinearAlgebra/NekVectorMetadata.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/LinearAlgebra/NekVectorTypeTraits.hpp b/library/LibUtilities/LinearAlgebra/NekVectorTypeTraits.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/LinearAlgebra/NekVectorVariableSized.hpp b/library/LibUtilities/LinearAlgebra/NekVectorVariableSized.hpp deleted file mode 100644 index 8a961ec1ebc8a1519ca9d2e2571c97ddaa535daa..0000000000000000000000000000000000000000 --- a/library/LibUtilities/LinearAlgebra/NekVectorVariableSized.hpp +++ /dev/null @@ -1,59 +0,0 @@ -///////////////////////////////////////////////////////////////////////////////// -//// -//// For more information, please see: http://www.nektar.info -//// -//// The MIT License -//// -//// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA), -//// Department of Aeronautics, Imperial College London (UK), and Scientific -//// Computing and Imaging Institute, University of Utah (USA). -//// -//// License for the specific language governing rights and limitations under -//// Permission is hereby granted, free of charge, to any person obtaining a -//// copy of this software and associated documentation files (the "Software"), -//// to deal in the Software without restriction, including without limitation -//// the rights to use, copy, modify, merge, publish, distribute, sublicense, -//// and/or sell copies of the Software, and to permit persons to whom the -//// Software is furnished to do so, subject to the following conditions: -//// -//// The above copyright notice and this permission notice shall be included -//// in all copies or substantial portions of the Software. -//// -//// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -//// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -//// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -//// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -//// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -//// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -//// DEALINGS IN THE SOFTWARE. -//// -//// Description: -//// -///////////////////////////////////////////////////////////////////////////////// - -//#ifndef NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_VECTOR_VARIABLE_SIZED_HPP -//#define NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_VECTOR_VARIABLE_SIZED_HPP - -//#include -//#include - -//#include -//#include -//#include - -//#include -//#include -//#include - -//namespace Nektar -//{ - - - -//} - -//#ifdef NEKTAR_USE_EXPRESSION_TEMPLATES - -//#endif - -//#endif // NEKTAR_LIB_UTILITIES_LINEAR_ALGEBRA_NEK_VECTOR_VARIABLE_SIZED_HPP diff --git a/library/LibUtilities/LinearAlgebra/ScaledMatrix.cpp b/library/LibUtilities/LinearAlgebra/ScaledMatrix.cpp index d8d9536b5f2cdf74a3c8b48565c232bd76c5373f..2ff40e6cc022e33172625e4d624e7c81e056af8c 100644 --- a/library/LibUtilities/LinearAlgebra/ScaledMatrix.cpp +++ b/library/LibUtilities/LinearAlgebra/ScaledMatrix.cpp @@ -39,7 +39,7 @@ namespace Nektar { template NekMatrix, ScaledMatrixTag>::NekMatrix() : - NekMatrix, ScaledMatrixTag>::BaseType(0,0,m_matrix->GetStorageType()), + NekMatrix, ScaledMatrixTag>::BaseType(0,0,eFULL), m_matrix(new typename NekMatrix, ScaledMatrixTag>::InnerType()), m_scale(0) { diff --git a/library/LibUtilities/LinearAlgebra/Space.h b/library/LibUtilities/LinearAlgebra/Space.h deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/LinearAlgebra/SparseMatrix.cpp b/library/LibUtilities/LinearAlgebra/SparseMatrix.cpp index 541333b5449732ed83cd3e324ce4e0be9d70be9a..5bf0da9c854cee095db93d032bc1639dbfe843b5 100644 --- a/library/LibUtilities/LinearAlgebra/SparseMatrix.cpp +++ b/library/LibUtilities/LinearAlgebra/SparseMatrix.cpp @@ -75,19 +75,19 @@ namespace Nektar template - const IndexType NekSparseMatrix::GetRows() const + IndexType NekSparseMatrix::GetRows() const { return m_sparseStorage->GetRows(); } template - const IndexType NekSparseMatrix::GetColumns() const + IndexType NekSparseMatrix::GetColumns() const { return m_sparseStorage->GetColumns(); } template - const IndexType NekSparseMatrix::GetNumNonZeroEntries() const + IndexType NekSparseMatrix::GetNumNonZeroEntries() const { return m_sparseStorage->GetNumNonZeroEntries(); } @@ -137,7 +137,7 @@ namespace Nektar } template - const size_t NekSparseMatrix::GetMemoryFootprint() const + size_t NekSparseMatrix::GetMemoryFootprint() const { return m_sparseStorage->GetMemoryUsage( m_sparseStorage->GetNumNonZeroEntries(), @@ -148,7 +148,7 @@ namespace Nektar } template - const unsigned long NekSparseMatrix::GetMulCallsCounter() const + unsigned long NekSparseMatrix::GetMulCallsCounter() const { return m_mulCallsCounter; } @@ -161,7 +161,7 @@ namespace Nektar } template - const IndexType NekSparseMatrix::GetBandwidth() + IndexType NekSparseMatrix::GetBandwidth() { int bandwidth = 0; diff --git a/library/LibUtilities/LinearAlgebra/SparseMatrix.hpp b/library/LibUtilities/LinearAlgebra/SparseMatrix.hpp index 30d0863155e6c6ea05d686548a734fa91867f35d..0d935929f782092d68969d256c948552010c90ce 100644 --- a/library/LibUtilities/LinearAlgebra/SparseMatrix.hpp +++ b/library/LibUtilities/LinearAlgebra/SparseMatrix.hpp @@ -73,15 +73,15 @@ namespace Nektar LIB_UTILITIES_EXPORT NekSparseMatrix(const NekSparseMatrix& src); LIB_UTILITIES_EXPORT ~NekSparseMatrix(); - LIB_UTILITIES_EXPORT const IndexType GetRows() const; - LIB_UTILITIES_EXPORT const IndexType GetColumns() const; - LIB_UTILITIES_EXPORT const IndexType GetNumNonZeroEntries() const; + LIB_UTILITIES_EXPORT IndexType GetRows() const; + LIB_UTILITIES_EXPORT IndexType GetColumns() const; + LIB_UTILITIES_EXPORT IndexType GetNumNonZeroEntries() const; LIB_UTILITIES_EXPORT const DataType GetFillInRatio() const; - LIB_UTILITIES_EXPORT const size_t GetMemoryFootprint() const; - LIB_UTILITIES_EXPORT const unsigned long GetMulCallsCounter() const; + LIB_UTILITIES_EXPORT size_t GetMemoryFootprint() const; + LIB_UTILITIES_EXPORT unsigned long GetMulCallsCounter() const; LIB_UTILITIES_EXPORT const DataType GetAvgRowDensity() const; - LIB_UTILITIES_EXPORT const IndexType GetBandwidth(); + LIB_UTILITIES_EXPORT IndexType GetBandwidth(); LIB_UTILITIES_EXPORT COOMatTypeSharedPtr GetCooStorage(); diff --git a/library/LibUtilities/LinearAlgebra/StandardMatrix.cpp b/library/LibUtilities/LinearAlgebra/StandardMatrix.cpp index 2a490ef0ed39a4ea904425a9b395f9e4656e54b2..0e7f7aa47d61987b9160e3cda0fcfeab0907bb27 100644 --- a/library/LibUtilities/LinearAlgebra/StandardMatrix.cpp +++ b/library/LibUtilities/LinearAlgebra/StandardMatrix.cpp @@ -38,7 +38,7 @@ namespace Nektar template NekMatrix::NekMatrix() : - Matrix(0, 0), + Matrix(0, 0, eFULL), m_data(), m_wrapperType(eCopy), m_numberOfSuperDiagonals(std::numeric_limits::max()), diff --git a/library/LibUtilities/LinearAlgebra/StandardMatrix.hpp b/library/LibUtilities/LinearAlgebra/StandardMatrix.hpp index 0971e6986768d63c41d5cb7a7c49855da25e0dd5..06901bcc04ba955284a06593cd789e1f5c70e74c 100644 --- a/library/LibUtilities/LinearAlgebra/StandardMatrix.hpp +++ b/library/LibUtilities/LinearAlgebra/StandardMatrix.hpp @@ -42,7 +42,6 @@ #include #include #include -#include #include #include diff --git a/library/LibUtilities/LinearAlgebra/StorageSmvBsr.cpp b/library/LibUtilities/LinearAlgebra/StorageSmvBsr.cpp index 9b5f3dd9634c94a49a2a1c4ce8f1db304e530920..03b34faba1cd7aa319917d9cbaa00232c107b610 100644 --- a/library/LibUtilities/LinearAlgebra/StorageSmvBsr.cpp +++ b/library/LibUtilities/LinearAlgebra/StorageSmvBsr.cpp @@ -29,8 +29,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // -// Description: 0-based sparse BSR storage class with own unrolled and -// LibSMV multiply kernels. +// Description: 0-based sparse BSR storage class with own unrolled multiply +// kernels. // /////////////////////////////////////////////////////////////////////////////// @@ -45,8 +45,6 @@ #include #include -#include - #include #include @@ -243,17 +241,6 @@ namespace Nektar throw 1; } -#ifdef NEKTAR_USING_SMV - // Set pointer to rank-specific matrix-vector multiply kernel. - // Number of ranks is defined by LibSMV library - switch (blkDim) - { - #define BOOST_PP_LOCAL_MACRO(n) case n: m_mvKernel = Smv::F77NAME(smv_##n); break; - #define BOOST_PP_LOCAL_LIMITS (1, LIBSMV_MAX_RANK) - #include BOOST_PP_LOCAL_ITERATE() - } -#endif - processBcoInput(blkRows,blkCols,blkDim,bcoMat); } @@ -389,18 +376,7 @@ namespace Nektar { case 1: Multiply_1x1(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; case 2: Multiply_2x2(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#ifndef NEKTAR_USING_SMV - case 3: Multiply_3x3(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - case 4: Multiply_4x4(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#endif default: -#ifdef NEKTAR_USING_SMV - if (m_blkDim <= LIBSMV_MAX_RANK) - { - Multiply_libsmv(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - } - else -#endif { Multiply_generic(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; } @@ -426,18 +402,7 @@ namespace Nektar { case 1: Multiply_1x1(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; case 2: Multiply_2x2(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#ifndef NEKTAR_USING_SMV - case 3: Multiply_3x3(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - case 4: Multiply_4x4(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#endif default: -#ifdef NEKTAR_USING_SMV - if (m_blkDim <= LIBSMV_MAX_RANK) - { - Multiply_libsmv(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - } - else -#endif { Multiply_generic(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; } @@ -465,18 +430,7 @@ namespace Nektar { case 1: Multiply_1x1(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; case 2: Multiply_2x2(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#ifndef NEKTAR_USING_SMV - case 3: Multiply_3x3(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - case 4: Multiply_4x4(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; -#endif default: -#ifdef NEKTAR_USING_SMV - if (m_blkDim <= LIBSMV_MAX_RANK) - { - Multiply_libsmv(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; - } - else -#endif { Multiply_generic(mb,kb,val,bindx,bpntrb,bpntre,b,c); return; } @@ -626,42 +580,6 @@ namespace Nektar } } -#ifdef NEKTAR_USING_SMV - /// Generic zero-based BSR multiply - template - void StorageSmvBsr::Multiply_libsmv( - const int mb, - const int kb, - const double* val, - const int* bindx, - const int* bpntrb, - const int* bpntre, - const double* b, - double* c) - { - const int lb = m_blkDim; - - const double *pval = val; - const int mm=lb*lb; - - double *pc=c; - for (int i=0;i!=mb*lb;i++) *pc++ = 0; - - pc=c; - for (int i=0;i!=mb;i++) - { - int jb = bpntrb[i]; - int je = bpntre[i]; - for (int j=jb;j!=je;j++) - { - m_mvKernel(pval,&b[bindx[j]*lb],pc); - pval+=mm; - } - pc += lb; - } - } -#endif - /// Generic zero-based BSR multiply for higher matrix ranks template void StorageSmvBsr::Multiply_generic( diff --git a/library/LibUtilities/LinearAlgebra/StorageSmvBsr.hpp b/library/LibUtilities/LinearAlgebra/StorageSmvBsr.hpp index 7bded37a37f0cc543a058c84fb90f9023f92394c..eeb472db7b6056e454abffd32c36ad2a3991c472 100644 --- a/library/LibUtilities/LinearAlgebra/StorageSmvBsr.hpp +++ b/library/LibUtilities/LinearAlgebra/StorageSmvBsr.hpp @@ -29,8 +29,8 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // -// Description: 0-based sparse BSR storage class with own unrolled and -// LibSMV multiply kernels. +// Description: 0-based sparse BSR storage class with own unrolled multiply +// kernels. // /////////////////////////////////////////////////////////////////////////////// @@ -52,11 +52,8 @@ namespace Nektar { /* * Zero-based BSR (Block Sparse Row) storage class with its sparse - * multiply kernels built upon dense multiply kernels provided by - * LibSMV library. It also includes its own unrolled multiply kernels - * up to 4x4 matrices, provided 'just in case' LibSMV library - * is not available. When matrix is larger than either one - * supported by LibSMV library or 4x4 (whichever is greater), the + * multiply kernels built upon its own dense unrolled multiply kernels + * up to 4x4 matrices. When matrix is larger than or 4x4, the * multiply kernel calls dense dgemv from BLAS. * * The BSR sparse format assumes sparse matrix is a CSR collection of @@ -194,13 +191,6 @@ namespace Nektar const int* bindx, const int* bpntrb, const int* bpntre, const double* b, double* c); -#ifdef NEKTAR_USING_SMV - void Multiply_libsmv(const int mb, const int kb, const double* val, - const int* bindx, const int* bpntrb, const int* bpntre, - const double* b, double* c); -#endif - - // interface to lowest level LibSMV multiply kernels MultiplyKernel m_mvKernel; diff --git a/library/LibUtilities/Memory/DeleteNothing.hpp b/library/LibUtilities/Memory/DeleteNothing.hpp deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/library/LibUtilities/Memory/NekMemoryManager.hpp b/library/LibUtilities/Memory/NekMemoryManager.hpp index d5354ee1681744ef7b916dfd551643952b68d385..190681d0f67e9a3b60b016e478f240ccfefe44f5 100644 --- a/library/LibUtilities/Memory/NekMemoryManager.hpp +++ b/library/LibUtilities/Memory/NekMemoryManager.hpp @@ -50,17 +50,6 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include #ifdef max @@ -70,321 +59,258 @@ namespace Nektar { -#ifndef NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS -#define NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS 20 -#endif //NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS +/// @brief General purpose memory allocation routines with the ability +/// to allocate from thread specific memory pools. +/// +/// If compiled with NEKTAR_MEMORY_POOL_ENABLED, the MemoryManager +/// allocates from thread specific memory pools for small objects. +/// Large objects are managed with the system supplied new/delete. +/// These memory pools provide faster allocation and deallocation +/// of small objects (particularly useful for shared pointers which +/// allocate many 4 byte objects). +/// +/// @warning All memory allocated from the memory manager must be returned +/// to the memory manager. Calling delete on memory allocated from the +/// manager will likely cause undefined behavior. A particularly subtle +/// violation of this rule occurs when giving memory allocated from the +/// manager to a shared pointer. +/// @code +/// boost::shared_ptr f(MemoryManager::Allocate()); +/// @endcode +/// Shared pointers call delete when they go out of scope, so this line of +/// code will cause problems. Instead, you should call the +/// AllocateSharedPtr method: +/// @code +/// boost::shared_ptr f = MemoryManager::AllocateSharedPtr(); +/// @endcode +template +class MemoryManager +{ + template + class DeallocateSharedPtr + { + public: + explicit DeallocateSharedPtr(const CustomDeallocator& d) : + m_dealloc(d) + { + } + + void operator()(ObjectType*& m) const + { + m_dealloc(); + MemoryManager::Deallocate(m); + } + + private: + CustomDeallocator m_dealloc; + + }; + + class DefaultCustomDeallocator + { + public: + void operator()() const + { + } + }; - /// @brief General purpose memory allocation routines with the ability - /// to allocate from thread specific memory pools. +public: + /// @brief Deallocate a pointer allocated by + /// MemoryManager::Allocate. + /// @note Results are undefined if called with a pointer to + /// something that was not allocated with the memory manager. /// - /// If compiled with NEKTAR_MEMORY_POOL_ENABLED, the MemoryManager - /// allocates from thread specific memory pools for small objects. - /// Large objects are managed with the system supplied new/delete. - /// These memory pools provide faster allocation and deallocation - /// of small objects (particularly useful for shared pointers which - /// allocate many 4 byte objects). + /// Use this method to deallocate a pointer you have allocated from + /// the MemoryManager using the Allocate method. /// - /// @warning All memory allocated from the memory manager must be returned - /// to the memory manager. Calling delete on memory allocated from the - /// manager will likely cause undefined behavior. A particularly subtle - /// violation of this rule occurs when giving memory allocated from the - /// manager to a shared pointer. + /// Example: /// @code - /// boost::shared_ptr f(MemoryManager::Allocate()); + /// CustObj* c = MemoryManager::Allocate(); + /// MemoryManager::Deallocate(c); /// @endcode - /// Shared pointers call delete when they go out of scope, so this line of - /// code will cause problems. Instead, you should call the - /// AllocateSharedPtr method: - /// @code - /// boost::shared_ptr f = MemoryManager::AllocateSharedPtr(); - /// @endcode - template - class MemoryManager + static void Deallocate(DataType*& data) { - private: - template - class DeallocateSharedPtr - { - public: - explicit DeallocateSharedPtr(const CustomDeallocator& d) : - m_dealloc(d) - { - } - - void operator()(ObjectType*& m) const - { - m_dealloc(); - MemoryManager::Deallocate(m); - } +#ifdef NEKTAR_MEMORY_POOL_ENABLED + data->~DataType(); + GetMemoryPool().Deallocate(data, sizeof(DataType)); +#else + delete data; +#endif - private: - CustomDeallocator m_dealloc; + data = NULL; + } - }; +#ifdef NEKTAR_MEMORY_POOL_ENABLED + /// @brief Allocates a single object from the memory pool. + /// @throws unknown If the object throws an exception during + /// construction, this method will catch it, release the memory + /// back to the pool, then rethrow it. + /// + /// The allocated object must be returned to the memory pool + /// via Deallocate. + template + static DataType* Allocate(const Args &...args) + { + DataType* result = static_cast( + GetMemoryPool().Allocate(sizeof(DataType))); - class DefaultCustomDeallocator - { - public: - void operator()() const - { - } - }; - - public: - /// @brief Deallocate a pointer allocated by - /// MemoryManager::Allocate. - /// @note Results are undefined if called with a pointer to - /// something that was not allocated with the memory manager. - /// - /// Use this method to deallocate a pointer you have allocated from - /// the MemoryManager using the Allocate method. - /// - /// Example: - /// @code - /// CustObj* c = MemoryManager::Allocate(); - /// MemoryManager::Deallocate(c); - /// @endcode - static void Deallocate(DataType*& data) + if (result) + { + try { - #ifdef NEKTAR_MEMORY_POOL_ENABLED - data->~DataType(); - GetMemoryPool().Deallocate(data, sizeof(DataType)); - #else //NEKTAR_MEMORY_POOL_ENABLED - delete data; - #endif //NEKTAR_MEMORY_POOL_ENABLED - - data = NULL; + new (result) DataType(args...); } - - #ifdef NEKTAR_MEMORY_POOL_ENABLED - /// @brief Allocates a single object from the memory pool. - /// @throws unknown If the object throws an exception during - /// construction, this method will catch it, release the memory - /// back to the pool, then rethrow it. - /// - /// The allocated object must be returned to the memory pool - /// via Deallocate. - static DataType* Allocate() - { - DataType* result = static_cast(GetMemoryPool().Allocate(sizeof(DataType))); - - if( result ) - { - try - { - new (result) DataType(); - } - catch(...) - { - GetMemoryPool().Deallocate(result, - sizeof(DataType)); - throw; - } - } - - return result; - } - #define ALLOCATE_METHOD_GENERATOR(z, i, methodName) \ - template \ - static DataType* methodName(BOOST_PP_ENUM_BINARY_PARAMS(i, const Arg, & arg)) \ - { \ - DataType* result = static_cast(GetMemoryPool().Allocate(sizeof(DataType))); \ - \ - if( result ) \ - { \ - try \ - { \ - new (result) DataType(BOOST_PP_ENUM_PARAMS(i, arg)); \ - } \ - catch(...) \ - { \ - GetMemoryPool().Deallocate(result, sizeof(DataType)); \ - throw; \ - } \ - } \ - \ - return result; \ - } - #else //NEKTAR_MEMORY_POOL_ENABLED - /// @brief Allocates a single object from the memory pool. - /// @throws unknown Any exception thrown by DataType's default - /// constructor will propogate through this method. - /// - /// The allocated object must be returned to the memory pool - /// via Deallocate. - static DataType* Allocate() - { - return new DataType(); - } - - #define ALLOCATE_METHOD_GENERATOR(z, i, methodName) \ - template \ - static DataType* methodName(BOOST_PP_ENUM_BINARY_PARAMS(i, const Arg, & arg)) \ - { \ - return new DataType(BOOST_PP_ENUM_PARAMS(i, arg)); \ - } - #endif //NEKTAR_MEMORY_POOL_ENABLED - - BOOST_PP_REPEAT_FROM_TO(1, NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS, ALLOCATE_METHOD_GENERATOR, Allocate); - - /// @brief Allocate a shared pointer from the memory pool. - /// - /// The shared pointer does not need to be returned to the memory - /// pool. When the reference count to this object reaches 0, the - /// shared pointer will automatically return the memory. - static boost::shared_ptr AllocateSharedPtr() + catch(...) { - return AllocateSharedPtrD(DefaultCustomDeallocator()); + GetMemoryPool().Deallocate(result, sizeof(DataType)); + throw; } + } - /// @def ALLOCATE_SHARED_PTR_METHOD_GENERATOR - /// @brief Generator for allocating shared pointers to objects with - /// constructors having a variable number of parameters. - /// - /// Uses the Boost preprocessor macros - /// - BOOST_PP_ENUM_PARAMS(idx, text) which generates a list of - /// parameters, in this case: "typename Arg0, typename Arg1, ..." - /// used for the template definition. - /// - BOOST_PP_ENUM_BINARY_PARAMS(idx, type, name) which generates - /// a list of parameters and variables, in this case: - /// "Arg0& arg0, Arg1& arg1, ..." used for the function prototype. - /// - /// @note All the parameter lists are references so whenever the - /// MemoryManager::AllocateSharedPtr(...) is used, all parameters - /// must be passed by reference. Consequently, fundamental - /// datatype parameters must be defined locally first. So - /// @code - /// MemoryManager::AllocateSharedPtr(Var, true); - /// @endcode - /// must be replaced by - /// @code - /// bool flag = true; - /// MemoryManager::AllocateSharedPtr(Var, flag); - /// @endcode - #define ALLOCATE_SHARED_PTR_METHOD_GENERATOR(z, i, methodName) \ - template \ - static boost::shared_ptr methodName(BOOST_PP_ENUM_BINARY_PARAMS(i, const Arg, & arg)) \ - { \ - return AllocateSharedPtrD(DefaultCustomDeallocator(), BOOST_PP_ENUM_PARAMS(i, arg)); \ - } + return result; + } - #define ALLOCATE_SHARED_PTR_METHOD_WITH_DEALLOCATOR_GENERATOR(z, i, methodName) \ - template \ - static boost::shared_ptr methodName(const DeallocatorType& d BOOST_PP_ENUM_TRAILING_BINARY_PARAMS(i, Arg, & arg)) \ - { \ - DataType* data = Allocate(BOOST_PP_ENUM_PARAMS(i, arg)); \ - return boost::shared_ptr(data, DeallocateSharedPtr(d)); \ - } +#else //NEKTAR_MEMORY_POOL_ENABLED + /// @brief Allocates a single object from the memory pool. + /// @throws unknown Any exception thrown by DataType's default + /// constructor will propogate through this method. + /// + /// The allocated object must be returned to the memory pool + /// via Deallocate. + template + static DataType* Allocate(const Args &...args) + { + return new DataType(args...); + } +#endif //NEKTAR_MEMORY_POOL_ENABLED - BOOST_PP_REPEAT_FROM_TO(0, NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS, ALLOCATE_SHARED_PTR_METHOD_WITH_DEALLOCATOR_GENERATOR, AllocateSharedPtrD); - BOOST_PP_REPEAT_FROM_TO(1, NEKTAR_MAX_MEMORY_MANAGER_CONSTRUCTOR_ARGS, ALLOCATE_SHARED_PTR_METHOD_GENERATOR, AllocateSharedPtr); + /// @brief Allocate a shared pointer from the memory pool. + /// + /// The shared pointer does not need to be returned to the memory + /// pool. When the reference count to this object reaches 0, the + /// shared pointer will automatically return the memory. + template + static boost::shared_ptr AllocateSharedPtr(const Args &...args) + { + return AllocateSharedPtrD(DefaultCustomDeallocator(), args...); + } + template + static boost::shared_ptr AllocateSharedPtrD( + const DeallocatorType& d, const Args &...args) + { + DataType* data = Allocate(args...); + return boost::shared_ptr( + data, DeallocateSharedPtr(d)); + } - /// \brief Allocates a chunk of raw, uninitialized memory, capable of holding NumberOfElements objects. - /// \param NumberOfElements The number of elements the array should be capable of holding. - /// - /// This method is not meant to be called by client code. Use Array instead. - /// Any memory allocated from this method must be returned to the memory pool - /// via RawDeallocate. Failure to do so will result in memory leaks and undefined - /// behavior. - static DataType* RawAllocate(unsigned int NumberOfElements) - { - #ifdef NEKTAR_MEMORY_POOL_ENABLED - return static_cast(GetMemoryPool().Allocate(sizeof(DataType)*NumberOfElements)); - #else //NEKTAR_MEMORY_POOL_ENABLED - return static_cast(::operator new(NumberOfElements * sizeof(DataType))); - #endif //NEKTAR_MEMORY_POOL_ENABLED - } + /// \brief Allocates a chunk of raw, uninitialized memory, capable of + /// holding NumberOfElements objects. + /// + /// \param NumberOfElements The number of elements the array should be + /// capable of holding. + /// + /// This method is not meant to be called by client code. Use Array + /// instead. Any memory allocated from this method must be returned to the + /// memory pool via RawDeallocate. Failure to do so will result in memory + /// leaks and undefined behavior. + static DataType* RawAllocate(unsigned int NumberOfElements) + { +#ifdef NEKTAR_MEMORY_POOL_ENABLED + return static_cast(GetMemoryPool().Allocate(sizeof(DataType)*NumberOfElements)); +#else //NEKTAR_MEMORY_POOL_ENABLED + return static_cast(::operator new(NumberOfElements * sizeof(DataType))); +#endif //NEKTAR_MEMORY_POOL_ENABLED + } - /// \brief Deallocates memory allocated from RawAllocate. - /// \param array A pointer to the memory returned from RawAllocate. - /// \param NumberOfElements The number of object held in the array. - /// - /// This method is not meant to be called by client code. Use Array instead. - /// Only memory allocated via RawAllocate should be returned to the pool here. - static void RawDeallocate(DataType* array, unsigned int NumberOfElements) - { - #ifdef NEKTAR_MEMORY_POOL_ENABLED - GetMemoryPool().Deallocate(array, sizeof(DataType)*NumberOfElements); - #else //NEKTAR_MEMORY_POOL_ENABLED - ::operator delete(array); - #endif //NEKTAR_MEMORY_POOL_ENABLED - - } + /// \brief Deallocates memory allocated from RawAllocate. + /// \param array A pointer to the memory returned from RawAllocate. + /// \param NumberOfElements The number of object held in the array. + /// + /// This method is not meant to be called by client code. Use Array instead. + /// Only memory allocated via RawAllocate should be returned to the pool here. + static void RawDeallocate(DataType* array, unsigned int NumberOfElements) + { +#ifdef NEKTAR_MEMORY_POOL_ENABLED + GetMemoryPool().Deallocate(array, sizeof(DataType)*NumberOfElements); +#else //NEKTAR_MEMORY_POOL_ENABLED + ::operator delete(array); +#endif //NEKTAR_MEMORY_POOL_ENABLED + } - ///////////////////////////////////////////////////////////////// - ///\name Allocator Interface - /// The allocator interface allows a MemoryManager object to be used - /// in any object that allows an allocator parameter, such as STL - /// containers. - ///////////////////////////////////////////////////////////////// - typedef DataType value_type; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - typedef DataType* pointer; - typedef const DataType* const_pointer; - typedef DataType& reference; - typedef const DataType& const_reference; - - MemoryManager() {} - template - MemoryManager(const MemoryManager& rhs) {} - ~MemoryManager() {} - - pointer address(reference r) const { return &r; } - const_pointer address(const_reference r) const { return &r; } - - pointer allocate(size_type n, std::allocator::const_pointer hint = 0)//typename MemoryManager::pointer hint = 0) - { - return RawAllocate(n); - } + ///////////////////////////////////////////////////////////////// + ///\name Allocator Interface + /// The allocator interface allows a MemoryManager object to be used + /// in any object that allows an allocator parameter, such as STL + /// containers. + ///////////////////////////////////////////////////////////////// + typedef DataType value_type; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef DataType* pointer; + typedef const DataType* const_pointer; + typedef DataType& reference; + typedef const DataType& const_reference; + + MemoryManager() {} + template + MemoryManager(const MemoryManager& rhs) {} + ~MemoryManager() {} + + pointer address(reference r) const { return &r; } + const_pointer address(const_reference r) const { return &r; } + + pointer allocate(size_type n, std::allocator::const_pointer hint = 0)//typename MemoryManager::pointer hint = 0) + { + return RawAllocate(n); + } - void deallocate(pointer p, size_type n) - { - return RawDeallocate(p, n); - } + void deallocate(pointer p, size_type n) + { + return RawDeallocate(p, n); + } - void construct(pointer p, const_reference val) - { - new(p) DataType(val); - } + void construct(pointer p, const_reference val) + { + new(p) DataType(val); + } - void destroy(pointer p) - { - p->~DataType(); - } + void destroy(pointer p) + { + p->~DataType(); + } - size_type max_size() - { - return std::numeric_limits::max()/sizeof(DataType); - } + size_type max_size() + { + return std::numeric_limits::max()/sizeof(DataType); + } - template - struct rebind - { - typedef MemoryManager other; - }; + template + struct rebind + { + typedef MemoryManager other; + }; - ///////////////////////////////////////////////////////////////// - ///@} - ///////////////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////////////// + ///@} + ///////////////////////////////////////////////////////////////// - private: +private: - }; +}; - template - bool operator==(const MemoryManager& lhs, const MemoryManager& rhs) - { - return true; - } +template +bool operator==(const MemoryManager& lhs, const MemoryManager& rhs) +{ + return true; +} - template - bool operator!=(const MemoryManager& lhs, const MemoryManager& rhs) - { - return !(lhs == rhs); - } +template +bool operator!=(const MemoryManager& lhs, const MemoryManager& rhs) +{ + return !(lhs == rhs); +} } diff --git a/library/LibUtilities/Memory/ThreadSpecificPool.cpp b/library/LibUtilities/Memory/ThreadSpecificPool.cpp index 425e5ff8af8d41a5ac863fc68ad9dab2bea7da89..5801e624d5f65ec81ed1e64cbbd8886ca98d003d 100644 --- a/library/LibUtilities/Memory/ThreadSpecificPool.cpp +++ b/library/LibUtilities/Memory/ThreadSpecificPool.cpp @@ -36,10 +36,7 @@ namespace Nektar { MemPool& GetMemoryPool() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static MemPool instance; + return instance; } } diff --git a/library/LibUtilities/Memory/ThreadSpecificPool.hpp b/library/LibUtilities/Memory/ThreadSpecificPool.hpp index 8f72406622f71b12d6d809d15d83b15cc20705ce..cf7a0cda1f2b2f8360e5e279d84e16ee68c9b652 100644 --- a/library/LibUtilities/Memory/ThreadSpecificPool.hpp +++ b/library/LibUtilities/Memory/ThreadSpecificPool.hpp @@ -42,7 +42,6 @@ #include #include -#include #include #include #include diff --git a/library/LibUtilities/TimeIntegration/TimeIntegrationScheme.cpp b/library/LibUtilities/TimeIntegration/TimeIntegrationScheme.cpp index 1a80d1e453bd7d7805733ec06891411d0590216a..07bf22ca0d49912b7d2db5af735586d6c052642f 100644 --- a/library/LibUtilities/TimeIntegration/TimeIntegrationScheme.cpp +++ b/library/LibUtilities/TimeIntegration/TimeIntegrationScheme.cpp @@ -44,9 +44,9 @@ namespace Nektar { TimeIntegrationSchemeManagerT &TimeIntegrationSchemeManager(void) { - TimeIntegrationSchemeManagerT& m = Loki::SingletonHolder::Instance(); - m.RegisterGlobalCreator(TimeIntegrationScheme::Create); - return m; + static TimeIntegrationSchemeManagerT instance; + instance.RegisterGlobalCreator(TimeIntegrationScheme::Create); + return instance; } @@ -1111,6 +1111,12 @@ namespace Nektar TimeIntegrationSolutionSharedPtr y_out = MemoryManager::AllocateSharedPtr(m_schemeKey,y_0,time,timestep); + if( GetIntegrationSchemeType() == eExplicit) + { + // ensure initial solution is in correct space + op.DoProjection(y_0,y_out->UpdateSolution(),time); + } + // calculate the initial derivative, if is part of the // solution vector of the current scheme if(m_numMultiStepDerivs) @@ -1573,8 +1579,12 @@ namespace Nektar } else if( type == eExplicit) { - // ensure solution is in correct space - op.DoProjection(m_Y,m_Y,m_T); + // Avoid projecting the same solution twice + if( ! ((i==0) && m_firstStageEqualsOldSolution) ) + { + // ensure solution is in correct space + op.DoProjection(m_Y,m_Y,m_T); + } op.DoOdeRhs(m_Y, m_F[i], m_T); } } @@ -1862,4 +1872,4 @@ namespace Nektar return os; } } -} \ No newline at end of file +} diff --git a/library/LibUtilities/TimeIntegration/TimeIntegrationWrapper.cpp b/library/LibUtilities/TimeIntegration/TimeIntegrationWrapper.cpp index caa97d55a912f2b18c1de59222f615b31248e3d2..cfe0142dfe03183fdb7f71b6fd735e4ead2a4c71 100644 --- a/library/LibUtilities/TimeIntegration/TimeIntegrationWrapper.cpp +++ b/library/LibUtilities/TimeIntegration/TimeIntegrationWrapper.cpp @@ -41,11 +41,8 @@ namespace LibUtilities { TimeIntegrationWrapperFactory &GetTimeIntegrationWrapperFactory() { - typedef Loki::SingletonHolder Type; - return Type::Instance(); + static TimeIntegrationWrapperFactory instance; + return instance; } TimeIntegrationWrapper::TimeIntegrationWrapper() diff --git a/library/LocalRegions/CMakeLists.txt b/library/LocalRegions/CMakeLists.txt index 8760767300924b47c961f43776b855f7dd7b1417..1711f92252da9a4046134bd377cb590fc7e8d0f0 100644 --- a/library/LocalRegions/CMakeLists.txt +++ b/library/LocalRegions/CMakeLists.txt @@ -40,11 +40,14 @@ TetExp.h TriExp.h ) - ADD_DEFINITIONS(-DLOCAL_REGIONS_EXPORTS) -ADD_NEKTAR_LIBRARY(LocalRegions lib ${NEKTAR_LIBRARY_TYPE} ${LOCAL_REGIONS_SOURCES} ${LOCAL_REGIONS_HEADERS}) - -TARGET_LINK_LIBRARIES(LocalRegions SpatialDomains StdRegions LibUtilities) +ADD_NEKTAR_LIBRARY(LocalRegions + SOURCES ${LOCAL_REGIONS_SOURCES} + HEADERS ${LOCAL_REGIONS_HEADERS} + DEPENDS SpatialDomains + SUMMARY "Nektar++ LocalRegions library" + DESCRIPTION "This library provides physical space expansions on the various supported regions.") -INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/LocalRegions COMPONENT dev FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") +INSTALL(DIRECTORY ./ DESTINATION ${NEKTAR_INCLUDE_DIR}/LocalRegions + COMPONENT dev FILES_MATCHING PATTERN "*.h" PATTERN "*.hpp") diff --git a/library/LocalRegions/Expansion1D.cpp b/library/LocalRegions/Expansion1D.cpp index 7b14d92d8a5ccb144b11554083b6ad5515f76c5e..34b5820f0606046a2301004c8d21238444cdd04f 100644 --- a/library/LocalRegions/Expansion1D.cpp +++ b/library/LocalRegions/Expansion1D.cpp @@ -41,7 +41,21 @@ namespace Nektar { namespace LocalRegions { - + void Expansion1D::v_NegateVertexNormal(const int vertex) + { + m_negatedNormals[vertex] = true; + for (int i = 0; i < GetCoordim(); ++i) + { + Vmath::Neg(m_vertexNormals[vertex][i].num_elements(), + m_vertexNormals[vertex][i], 1); + } + } + + bool Expansion1D::v_VertexNormalNegated(const int vertex) + { + return m_negatedNormals[vertex]; + } + DNekMatSharedPtr Expansion1D::v_GenMatrix(const StdRegions::StdMatrixKey &mkey) { DNekMatSharedPtr returnval; diff --git a/library/LocalRegions/Expansion1D.h b/library/LocalRegions/Expansion1D.h index f59fa515496b29e741f7e9e2f7998bb6669fccd3..c4c506bd823b51c511f36a507fd8d471588199ae 100644 --- a/library/LocalRegions/Expansion1D.h +++ b/library/LocalRegions/Expansion1D.h @@ -98,6 +98,8 @@ namespace Nektar inline SpatialDomains::Geometry1DSharedPtr GetGeom1D() const; protected: + std::map m_negatedNormals; + virtual DNekMatSharedPtr v_GenMatrix( const StdRegions::StdMatrixKey &mkey); @@ -110,6 +112,10 @@ namespace Nektar const int vert, const Array &primCoeffs, Array &coeffs); + + virtual void v_NegateVertexNormal (const int vertex); + + virtual bool v_VertexNormalNegated(const int vertex); private: Expansion2DWeakPtr m_elementLeft; diff --git a/library/LocalRegions/Expansion3D.cpp b/library/LocalRegions/Expansion3D.cpp index 42456328bdaf4abd183e9205ad937044d76ba4d6..287b3aef98d60da86565067df4b87885e3f535dd 100644 --- a/library/LocalRegions/Expansion3D.cpp +++ b/library/LocalRegions/Expansion3D.cpp @@ -1103,9 +1103,7 @@ namespace Nektar "Not set up for non boundary-interior expansions"); ASSERTL1(inoutmat->GetRows() == inoutmat->GetColumns(), "Assuming that input matrix was square"); - ASSERTL1(GetBasisType(0) == LibUtilities::eModified_A, - "Method set up only for modified modal bases curretly"); - + int i,j; int id1,id2; Expansion2DSharedPtr faceExp = m_faceExp[face].lock(); diff --git a/library/LocalRegions/HexExp.cpp b/library/LocalRegions/HexExp.cpp index 6c547a297a6f478109f1aff58c7e3190c35b9c95..afc53d83a1bb8e349247d69f3da7579eefe5a973 100644 --- a/library/LocalRegions/HexExp.cpp +++ b/library/LocalRegions/HexExp.cpp @@ -710,6 +710,26 @@ namespace Nektar } break; } + case LibUtilities::eGLL_Lagrange: + { + LibUtilities::PointsKey + p0(nummodes[0], LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey + p1(nummodes[1], LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey + p2(nummodes[2], LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey t0( + m_base[0]->GetNumModes(), + LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey t1( + m_base[1]->GetNumModes(), + LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey t2( + m_base[2]->GetNumModes(), + LibUtilities::eGaussLobattoLegendre); + LibUtilities::Interp3D(p0, p1, p2, data, t0, t1, t2, coeffs); + } + break; default: ASSERTL0(false, "basis is either not set up or not " "hierarchicial"); diff --git a/library/LocalRegions/LocalRegionsDeclspec.h b/library/LocalRegions/LocalRegionsDeclspec.h index beab16444ed65c588c7e7c91a0aa4d940d91a305..6ae4788f5349bfd12623a2ab31fd77a2a59db1b8 100644 --- a/library/LocalRegions/LocalRegionsDeclspec.h +++ b/library/LocalRegions/LocalRegionsDeclspec.h @@ -43,7 +43,5 @@ #define LOCAL_REGIONS_EXPORT #endif -#define LOKI_CLASS_LEVEL_THREADING - #endif //NEKTAR_STD_REGIONS_DECLSPEC_H diff --git a/library/LocalRegions/PyrExp.cpp b/library/LocalRegions/PyrExp.cpp index 40062629248c627e4d1ae02ad3076de1dd8042c6..90a298705d23bd00e7097afc2eab46d7c2e8644b 100644 --- a/library/LocalRegions/PyrExp.cpp +++ b/library/LocalRegions/PyrExp.cpp @@ -273,30 +273,205 @@ namespace Nektar * \sum_{k=0}^{nq_0} \psi_{p}^a (\xi_{3k}) g_{pq} (\xi_{3k}) = {\bf * B_1 G} \f$ */ + void PyrExp::v_IProductWRTBase( - const Array &inarray, + const Array& inarray, + Array& outarray) + { + v_IProductWRTBase_SumFac(inarray, outarray); + } + + void PyrExp::v_IProductWRTBase_SumFac( + const Array& inarray, + Array& outarray, + bool multiplybyweights) + { + const int nquad0 = m_base[0]->GetNumPoints(); + const int nquad1 = m_base[1]->GetNumPoints(); + const int nquad2 = m_base[2]->GetNumPoints(); + const int order0 = m_base[0]->GetNumModes(); + const int order1 = m_base[1]->GetNumModes(); + + Array wsp(order0*nquad2*(nquad1+order1)); + + if(multiplybyweights) + { + Array tmp(nquad0*nquad1*nquad2); + + MultiplyByQuadratureMetric(inarray, tmp); + + IProductWRTBase_SumFacKernel(m_base[0]->GetBdata(), + m_base[1]->GetBdata(), + m_base[2]->GetBdata(), + tmp,outarray,wsp, + true,true,true); + } + else + { + IProductWRTBase_SumFacKernel(m_base[0]->GetBdata(), + m_base[1]->GetBdata(), + m_base[2]->GetBdata(), + inarray,outarray,wsp, + true,true,true); + } + } + + + /** + * @brief Calculates the inner product \f$ I_{pqr} = (u, + * \partial_{x_i} \phi_{pqr}) \f$. + * + * The derivative of the basis functions is performed using the chain + * rule in order to incorporate the geometric factors. Assuming that + * the basis functions are a tensor product + * \f$\phi_{pqr}(\eta_1,\eta_2,\eta_3) = + * \phi_1(\eta_1)\phi_2(\eta_2)\phi_3(\eta_3)\f$, this yields the + * result + * + * \f[ + * I_{pqr} = \sum_{j=1}^3 \left(u, \frac{\partial u}{\partial \eta_j} + * \frac{\partial \eta_j}{\partial x_i}\right) + * \f] + * + * In the pyramid element, we must also incorporate a second set + * of geometric factors which incorporate the collapsed co-ordinate + * system, so that + * + * \f[ \frac{\partial\eta_j}{\partial x_i} = \sum_{k=1}^3 + * \frac{\partial\eta_j}{\partial\xi_k}\frac{\partial\xi_k}{\partial + * x_i} \f] + * + * These derivatives can be found on p152 of Sherwin & Karniadakis. + * + * @param dir Direction in which to take the derivative. + * @param inarray The function \f$ u \f$. + * @param outarray Value of the inner product. + */ + void PyrExp::v_IProductWRTDerivBase( + const int dir, + const Array &inarray, Array &outarray) { - int nquad0 = m_base[0]->GetNumPoints(); - int nquad1 = m_base[1]->GetNumPoints(); - int nquad2 = m_base[2]->GetNumPoints(); - Array jac = m_metricinfo->GetJac(GetPointsKeys()); - Array tmp(nquad0*nquad1*nquad2); - - // multiply inarray with Jacobian + v_IProductWRTDerivBase_SumFac(dir, inarray, outarray); + } + + void PyrExp::v_IProductWRTDerivBase_SumFac( + const int dir, + const Array &inarray, + Array &outarray) + { + const int nquad0 = m_base[0]->GetNumPoints(); + const int nquad1 = m_base[1]->GetNumPoints(); + const int nquad2 = m_base[2]->GetNumPoints(); + const int order0 = m_base[0]->GetNumModes (); + const int order1 = m_base[1]->GetNumModes (); + const int nqtot = nquad0*nquad1*nquad2; + int i; + + const Array &z0 = m_base[0]->GetZ(); + const Array &z1 = m_base[1]->GetZ(); + const Array &z2 = m_base[2]->GetZ(); + + Array gfac0(nquad0 ); + Array gfac1(nquad1 ); + Array gfac2(nquad2 ); + Array tmp1 (nqtot ); + Array tmp2 (nqtot ); + Array tmp3 (nqtot ); + Array tmp4 (nqtot ); + Array tmp5 (nqtot ); + Array tmp6 (m_ncoeffs); + Array wsp (std::max(nqtot,order0*nquad2*(nquad1+order1))); + + const Array& df = + m_metricinfo->GetDerivFactors(GetPointsKeys()); + + MultiplyByQuadratureMetric(inarray, tmp1); + if(m_metricinfo->GetGtype() == SpatialDomains::eDeformed) { - Vmath::Vmul(nquad0*nquad1*nquad2,&jac[0],1,(NekDouble*)&inarray[0],1,&tmp[0],1); + Vmath::Vmul(nqtot,&df[3*dir][0], 1,tmp1.get(),1,tmp2.get(),1); + Vmath::Vmul(nqtot,&df[3*dir+1][0],1,tmp1.get(),1,tmp3.get(),1); + Vmath::Vmul(nqtot,&df[3*dir+2][0],1,tmp1.get(),1,tmp4.get(),1); } else { - Vmath::Smul(nquad0*nquad1*nquad2,jac[0],(NekDouble*)&inarray[0],1,&tmp[0],1); + Vmath::Smul(nqtot, df[3*dir][0], tmp1.get(),1,tmp2.get(), 1); + Vmath::Smul(nqtot, df[3*dir+1][0],tmp1.get(),1,tmp3.get(), 1); + Vmath::Smul(nqtot, df[3*dir+2][0],tmp1.get(),1,tmp4.get(), 1); + } + + // set up geometric factor: (1+z0)/2 + for (i = 0; i < nquad0; ++i) + { + gfac0[i] = 0.5*(1+z0[i]); + } + + // set up geometric factor: (1+z1)/2 + for(i = 0; i < nquad1; ++i) + { + gfac1[i] = 0.5*(1+z1[i]); + } + + // Set up geometric factor: 2/(1-z2) + for (i = 0; i < nquad2; ++i) + { + gfac2[i] = 2.0/(1-z2[i]); + } + + const int nq01 = nquad0*nquad1; + + for (i = 0; i < nquad2; ++i) + { + Vmath::Smul(nq01,gfac2[i],&tmp2[0]+i*nq01,1,&tmp2[0]+i*nq01,1); // 2/(1-z2) for d/dxi_0 + Vmath::Smul(nq01,gfac2[i],&tmp3[0]+i*nq01,1,&tmp3[0]+i*nq01,1); // 2/(1-z2) for d/dxi_1 + Vmath::Smul(nq01,gfac2[i],&tmp4[0]+i*nq01,1,&tmp5[0]+i*nq01,1); // 2/(1-z2) for d/dxi_2 + } + + // (1+z0)/(1-z2) for d/d eta_0 + for(i = 0; i < nquad1*nquad2; ++i) + { + Vmath::Vmul(nquad0,&gfac0[0],1, + &tmp5[0]+i*nquad0,1, + &wsp[0]+i*nquad0,1); + } + + Vmath::Vadd(nqtot, &tmp2[0], 1, &wsp[0], 1, &tmp2[0], 1); + + // (1+z1)/(1-z2) for d/d eta_1 + for(i = 0; i < nquad1*nquad2; ++i) + { + Vmath::Smul(nquad0,gfac1[i%nquad1], + &tmp5[0]+i*nquad0,1, + &tmp5[0]+i*nquad0,1); } + Vmath::Vadd(nqtot, &tmp3[0], 1, &tmp5[0], 1, &tmp3[0], 1); + - StdPyrExp::v_IProductWRTBase(tmp,outarray); + IProductWRTBase_SumFacKernel(m_base[0]->GetDbdata(), + m_base[1]->GetBdata (), + m_base[2]->GetBdata (), + tmp2,outarray,wsp, + false,true,true); + + IProductWRTBase_SumFacKernel(m_base[0]->GetBdata (), + m_base[1]->GetDbdata(), + m_base[2]->GetBdata (), + tmp3,tmp6,wsp, + true,false,true); + + Vmath::Vadd(m_ncoeffs, tmp6, 1, outarray, 1, outarray, 1); + + IProductWRTBase_SumFacKernel(m_base[0]->GetBdata (), + m_base[1]->GetBdata (), + m_base[2]->GetDbdata(), + tmp4,tmp6,wsp, + true,true,false); + + Vmath::Vadd(m_ncoeffs, tmp6, 1, outarray, 1, outarray, 1); } - + //--------------------------------------- // Evaluation functions //--------------------------------------- @@ -743,6 +918,36 @@ namespace Nektar } } + void PyrExp::v_SVVLaplacianFilter( + Array &array, + const StdRegions::StdMatrixKey &mkey) + { + int nq = GetTotPoints(); + + // Calculate sqrt of the Jacobian + Array jac = + m_metricinfo->GetJac(GetPointsKeys()); + Array sqrt_jac(nq); + if (m_metricinfo->GetGtype() == SpatialDomains::eDeformed) + { + Vmath::Vsqrt(nq,jac,1,sqrt_jac,1); + } + else + { + Vmath::Fill(nq,sqrt(jac[0]),sqrt_jac,1); + } + + // Multiply array by sqrt(Jac) + Vmath::Vmul(nq,sqrt_jac,1,array,1,array,1); + + // Apply std region filter + StdPyrExp::v_SVVLaplacianFilter( array, mkey); + + // Divide by sqrt(Jac) + Vmath::Vdiv(nq,array,1,sqrt_jac,1,array,1); + } + + //--------------------------------------- // Matrix creation functions //--------------------------------------- @@ -1027,7 +1232,6 @@ namespace Nektar returnval->SetBlock(0,1,Atmp = MemoryManager::AllocateSharedPtr(one,B)); returnval->SetBlock(1,0,Atmp = MemoryManager::AllocateSharedPtr(factor,C)); returnval->SetBlock(1,1,Atmp = MemoryManager::AllocateSharedPtr(invfactor,D)); - } } return returnval; diff --git a/library/LocalRegions/PyrExp.h b/library/LocalRegions/PyrExp.h index 4c02c5108ea5ca7b90ab126cf126360dfa532794..dc536e1abbe6b08265fb28977dd54475a5b82ad4 100644 --- a/library/LocalRegions/PyrExp.h +++ b/library/LocalRegions/PyrExp.h @@ -96,6 +96,18 @@ namespace Nektar LOCAL_REGIONS_EXPORT virtual void v_IProductWRTBase( const Array& inarray, Array& outarray); + LOCAL_REGIONS_EXPORT virtual void v_IProductWRTBase_SumFac( + const Array& inarray, + Array& outarray, + bool multiplybyweights = true); + LOCAL_REGIONS_EXPORT void v_IProductWRTDerivBase( + const int dir, + const Array& inarray, + Array& outarray); + LOCAL_REGIONS_EXPORT void v_IProductWRTDerivBase_SumFac( + const int dir, + const Array& inarray, + Array& outarray); //--------------------------------------- @@ -131,6 +143,12 @@ namespace Nektar Array &outarray); LOCAL_REGIONS_EXPORT void v_ComputeFaceNormal(const int face); + + + LOCAL_REGIONS_EXPORT virtual void v_SVVLaplacianFilter( + Array &array, + const StdRegions::StdMatrixKey &mkey); + //--------------------------------------- // Matrix creation functions //--------------------------------------- diff --git a/library/LocalRegions/QuadExp.cpp b/library/LocalRegions/QuadExp.cpp index 78856a863028c8a69f5fe71ddf719595bc7b8e2f..1a857057f72082492005f4a1104e6f38c1019bae 100644 --- a/library/LocalRegions/QuadExp.cpp +++ b/library/LocalRegions/QuadExp.cpp @@ -1563,15 +1563,17 @@ namespace Nektar break; case LibUtilities::eGLL_Lagrange: { - // Assume that input is also Gll_Lagrange but no way to check; LibUtilities::PointsKey p0(nummodes[0], LibUtilities::eGaussLobattoLegendre); LibUtilities::PointsKey p1(nummodes[1], LibUtilities::eGaussLobattoLegendre); - LibUtilities::Interp2D(p0, p1, data, - m_base[0]->GetPointsKey(), - m_base[1]->GetPointsKey(), - coeffs); + LibUtilities::PointsKey t0( + m_base[0]->GetNumModes(), + LibUtilities::eGaussLobattoLegendre); + LibUtilities::PointsKey t1( + m_base[1]->GetNumModes(), + LibUtilities::eGaussLobattoLegendre); + LibUtilities::Interp2D(p0, p1, data, t0, t1, coeffs); } break; case LibUtilities::eGauss_Lagrange: @@ -1581,10 +1583,13 @@ namespace Nektar p0(nummodes[0],LibUtilities::eGaussGaussLegendre); LibUtilities::PointsKey p1(nummodes[1],LibUtilities::eGaussGaussLegendre); - LibUtilities::Interp2D(p0, p1, data, - m_base[0]->GetPointsKey(), - m_base[1]->GetPointsKey(), - coeffs); + LibUtilities::PointsKey t0( + m_base[0]->GetNumModes(), + LibUtilities::eGaussGaussLegendre); + LibUtilities::PointsKey t1( + m_base[1]->GetNumModes(), + LibUtilities::eGaussGaussLegendre); + LibUtilities::Interp2D(p0, p1, data, t0, t1, coeffs); } break; default: diff --git a/library/LocalRegions/SegExp.cpp b/library/LocalRegions/SegExp.cpp index 9f7aefbefb13cfa509c303b0d447c522ee35f0c9..76f69b7c47b5bb10909a838965a94afeb564b2c6 100644 --- a/library/LocalRegions/SegExp.cpp +++ b/library/LocalRegions/SegExp.cpp @@ -426,7 +426,9 @@ cout<<"deps/dx ="<GetPointsType() == LibUtilities::eGaussLobattoLegendre || + m_base[0]->GetPointsType() == LibUtilities::ePolyEvenlySpaced, + "Cannot use FwdTrans_BndConstrained with these points."); offset = 2; } break; diff --git a/library/MultiRegions/AssemblyMap/AssemblyMapCG.cpp b/library/MultiRegions/AssemblyMap/AssemblyMapCG.cpp index 390be6f2f83be8f21795f85841c0aa5a10f1f15a..c465350e5294e9ad7eb59ac40e218efbc405ddcb 100644 --- a/library/MultiRegions/AssemblyMap/AssemblyMapCG.cpp +++ b/library/MultiRegions/AssemblyMap/AssemblyMapCG.cpp @@ -275,7 +275,7 @@ namespace Nektar for(j = 0; j < locExpVector.size(); j++) { - exp = locExpVector[locExp.GetOffset_Elmt_Id(j)]; + exp = locExpVector[j]; for(k = 0; k < exp->GetNverts(); k++) { @@ -580,7 +580,7 @@ namespace Nektar /// - Count verts, edges, face and add up edges and face sizes for(i = 0; i < locExpVector.size(); ++i) { - exp = locExpVector[locExp.GetOffset_Elmt_Id(i)]; + exp = locExpVector[i]; nTotalVerts += exp->GetNverts(); nTotalEdges += exp->GetNedges(); nTotalFaces += exp->GetNfaces(); @@ -1073,7 +1073,7 @@ namespace Nektar // edges respectively which are local to this process. for(i = cnt = 0; i < locExpVector.size(); ++i) { - int elmtid = locExp.GetOffset_Elmt_Id(i); + int elmtid = i; exp = locExpVector[elmtid]; for (j = 0; j < exp->GetNverts(); ++j) { @@ -1359,8 +1359,9 @@ namespace Nektar { ASSERTL0( (exp->GetEdgeBasisType(j) == LibUtilities::eModified_A) || (exp->GetEdgeBasisType(j) == LibUtilities::eModified_B) || - (exp->GetEdgeBasisType(j) == LibUtilities::eModified_C), - "CG with variable order only available with modal expansion"); + (exp->GetEdgeBasisType(j) == LibUtilities::eModified_C) || + (exp->GetEdgeBasisType(j) == LibUtilities::eModifiedPyr_C), + "CG with variable order only available with modal expansion"); } dofs[1][exp->GetGeom()->GetEid(j)] = min(dofs[1][exp->GetGeom()->GetEid(j)], @@ -1551,7 +1552,7 @@ namespace Nektar for(i = 0; i < locExpVector.size(); ++i) { - exp = locExpVector[locExp.GetOffset_Elmt_Id(i)]; + exp = locExpVector[i]; for(j = 0; j < exp->GetNverts(); ++j) { @@ -1611,10 +1612,10 @@ namespace Nektar for(i = 0; i < m_numPatches; ++i) { m_numLocalBndCoeffsPerPatch[i] = (unsigned int) - locExpVector[locExp.GetOffset_Elmt_Id(i)]->NumBndryCoeffs(); + locExpVector[i]->NumBndryCoeffs(); m_numLocalIntCoeffsPerPatch[i] = (unsigned int) - locExpVector[locExp.GetOffset_Elmt_Id(i)]->GetNcoeffs() - - locExpVector[locExp.GetOffset_Elmt_Id(i)]->NumBndryCoeffs(); + locExpVector[i]->GetNcoeffs() - + locExpVector[i]->NumBndryCoeffs(); } /** @@ -1986,7 +1987,7 @@ namespace Nektar for (i = 0; i < locExpVector.size(); ++i) { - exp = locExpVector[locExp.GetOffset_Elmt_Id(i)]; + exp = locExpVector[i]; for (j = 0; j < exp->GetNverts(); ++j) { diff --git a/library/MultiRegions/AssemblyMap/AssemblyMapDG.cpp b/library/MultiRegions/AssemblyMap/AssemblyMapDG.cpp index 6fe286e0347a124474289f7a5fd1519e343ba978..57c87328e013e186085902f33902dc0a29c341b4 100644 --- a/library/MultiRegions/AssemblyMap/AssemblyMapDG.cpp +++ b/library/MultiRegions/AssemblyMap/AssemblyMapDG.cpp @@ -178,7 +178,7 @@ namespace Nektar int nbndry = 0; for(i = 0; i < nel; ++i) // count number of elements in array { - eid = locExp.GetOffset_Elmt_Id(i); + eid = i; nbndry += expList[eid]->NumDGBndryCoeffs(); m_numLocalIntCoeffsPerPatch[i] = 0; m_numLocalBndCoeffsPerPatch[i] = @@ -230,7 +230,7 @@ namespace Nektar // Set up boost Graph for(i = 0; i < nel; ++i) { - eid = locExp.GetOffset_Elmt_Id(i); + eid = i; for(j = 0; j < expList[eid]->GetNtrace(); ++j) { @@ -319,8 +319,7 @@ namespace Nektar cnt = 0; for(i = 0; i < nel; ++i) { - // order list according to m_offset_elmt_id details in expList - eid = locExp.GetOffset_Elmt_Id(i); + eid = i; exp = expList[eid]; for(j = 0; j < exp->GetNtrace(); ++j) @@ -585,9 +584,7 @@ namespace Nektar cnt = 0; for(i = 0; i < locExpVector.size(); ++i) { - // Order list according to m_offset_elmt_id details in Exp - // so that triangules are listed first and then quads - eid = locExp.GetOffset_Elmt_Id(i); + eid = i; locExpansion = locExpVector[eid]; nDim = locExpansion->GetShapeDimension(); diff --git a/library/MultiRegions/CMakeLists.txt b/library/MultiRegions/CMakeLists.txt index d4d0ea0b525ebac667da56028988db2314016ebb..035767d00d2682339fb6092c01821e6de55f2af5 100644 --- a/library/MultiRegions/CMakeLists.txt +++ b/library/MultiRegions/CMakeLists.txt @@ -130,9 +130,12 @@ ENDIF(NEKTAR_USE_PETSC) ADD_DEFINITIONS(-DMULTI_REGIONS_EXPORTS) -ADD_NEKTAR_LIBRARY(MultiRegions lib ${NEKTAR_LIBRARY_TYPE} ${MULTI_REGIONS_SOURCES} ${MULTI_REGIONS_HEADERS} ${ASSEMBLY_MAP_HEADERS}) - -TARGET_LINK_LIBRARIES(MultiRegions LINK_PUBLIC Collections) +ADD_NEKTAR_LIBRARY(MultiRegions + SOURCES ${MULTI_REGIONS_SOURCES} + HEADERS ${MULTI_REGIONS_HEADERS} + DEPENDS Collections + SUMMARY "Nektar++ MultiRegions library" + DESCRIPTION "This library provides global expansions on multi-element domains.") # MultiRegions uses additional routines from Metis TARGET_LINK_LIBRARIES(MultiRegions LINK_PRIVATE ${METIS_LIB}) diff --git a/library/MultiRegions/ContField1D.cpp b/library/MultiRegions/ContField1D.cpp index ff2eb88f0f3f66055f6391e966f337272ba684c6..b042806c45f83004194f1c7001569b874ce81fcf 100644 --- a/library/MultiRegions/ContField1D.cpp +++ b/library/MultiRegions/ContField1D.cpp @@ -114,10 +114,12 @@ namespace Nektar * @param variable An optional parameter to indicate for which * variable the field should be constructed. */ - ContField1D::ContField1D(const LibUtilities::SessionReaderSharedPtr &pSession, - const SpatialDomains::MeshGraphSharedPtr &graph1D, - const std::string &variable): - DisContField1D(pSession,graph1D,variable,false), + ContField1D::ContField1D( + const LibUtilities::SessionReaderSharedPtr &pSession, + const SpatialDomains::MeshGraphSharedPtr &graph1D, + const std::string &variable, + const Collections::ImplementationType ImpType): + DisContField1D(pSession,graph1D,variable,false,ImpType), m_locToGloMap(), m_globalLinSysManager( boost::bind(&ContField1D::GenGlobalLinSys, this, _1), diff --git a/library/MultiRegions/ContField1D.h b/library/MultiRegions/ContField1D.h index ee4c4a0877604170fa00f1a95f1c81bb810ac985..0c3844910667507911a502