diff --git a/.github/workflows/idefix-ci-jobs.yml b/.github/workflows/idefix-ci-jobs.yml index 32eef5911..847ba3cbe 100644 --- a/.github/workflows/idefix-ci-jobs.yml +++ b/.github/workflows/idefix-ci-jobs.yml @@ -38,6 +38,8 @@ jobs: run: scripts/ci/run-tests $IDEFIX_DIR/test/HD/sod-iso -all $TESTME_OPTIONS - name: Mach reflection test run: scripts/ci/run-tests $IDEFIX_DIR/test/HD//MachReflection -all $TESTME_OPTIONS + - name: Sedov blast wave + run: scripts/ci/run-tests $IDEFIX_DIR/test/HD/SedovBlastWave -all $TESTME_OPTIONS ParabolicHydro: runs-on: self-hosted @@ -68,8 +70,10 @@ jobs: run: scripts/ci/run-tests $IDEFIX_DIR/test/MHD/sod-iso -all $TESTME_OPTIONS - name: Orszag Tang run: scripts/ci/run-tests $IDEFIX_DIR/test/MHD/OrszagTang -all $TESTME_OPTIONS - - name: Orszag Tang 3D+restart tests + - name: Orszag Tang 3D run: scripts/ci/run-tests $IDEFIX_DIR/test/MHD/OrszagTang3D -all $TESTME_OPTIONS + - name: Linear wave test + run: scripts/ci/run-tests $IDEFIX_DIR/test/MHD/LinearWaveTest -all $TESTME_OPTIONS - name: Axis Flux tube run: scripts/ci/run-tests $IDEFIX_DIR/test/MHD/AxisFluxTube -all $TESTME_OPTIONS @@ -221,3 +225,24 @@ jobs: run: scripts/ci/run-tests $IDEFIX_DIR/test/utils/dumpImage -all $TESTME_OPTIONS - name: Column density run: scripts/ci/run-tests $IDEFIX_DIR/test/utils/columnDensity -all $TESTME_OPTIONS + + IOs: + needs: [Fargo, Dust, Planet, ShearingBox, SelfGravity] + runs-on: self-hosted + steps: + - name: Check out repo + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 + with: + submodules: recursive + persist-credentials: false + - name: Restart dumps + run: scripts/ci/run-tests $IDEFIX_DIR/test/IO/dump -all $TESTME_OPTIONS + - name: Pydefix + run: | + python3 -m venv $IDEFIX_DIR/test/IO/pydefix/env + source $IDEFIX_DIR/test/IO/pydefix/env/bin/activate + python3 -m pip install -r $IDEFIX_DIR/test/IO/pydefix/python_requirements.txt + scripts/ci/run-tests $IDEFIX_DIR/test/IO/pydefix -all $TESTME_OPTIONS + + - name: xdmf + run: scripts/ci/run-tests $IDEFIX_DIR/test/IO/xdmf -all $TESTME_OPTIONS diff --git a/.github/workflows/idefix-ci.yml b/.github/workflows/idefix-ci.yml index b6ed719e9..50fd26eee 100644 --- a/.github/workflows/idefix-ci.yml +++ b/.github/workflows/idefix-ci.yml @@ -32,7 +32,7 @@ jobs: name: CPU Jobs (intel OneApi) uses: ./.github/workflows/idefix-ci-jobs.yml with: - TESTME_OPTIONS: -intel -Werror + TESTME_OPTIONS: -intel -Werror -ccache IDEFIX_COMPILER: icc gcc-jobs: @@ -40,7 +40,7 @@ jobs: name: CPU Jobs (gcc) uses: ./.github/workflows/idefix-ci-jobs.yml with: - TESTME_OPTIONS: -Werror + TESTME_OPTIONS: -Werror -ccache IDEFIX_COMPILER: gcc cuda-jobs: diff --git a/.gitignore b/.gitignore index 4de7fb76e..e3bf696a2 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,9 @@ CMakeFiles CMakeCache.txt cmake_install.cmake Makefile.local +git-state.txt +Kokkos_Version_info.* + # test artifacts test/**/*.o @@ -36,6 +39,9 @@ test/**/KokkosCore* test/**/*.csv test/**/*.pyc test/**/*.dat +test/**/cmake_packages* +test/**/ +idefix-tests.junit.xml # machine specific cache and hidden files .* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f0b285315..8249b7b03 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,9 +45,10 @@ repos: - F403 # ignore import * - repo: https://github.com/neutrinoceros/inifix - rev: v5.0.2 + rev: v6.1.2 hooks: - id: inifix-format + files: ^(test/).*\.(ini)$ # want to skip pytest.ini - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3778d514e..6bb7f979e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.3.0] 2026-04-21 +### Changed + +- fixed a bug that could lead to diverging results around the spherical axis in non-ideal MHD in 2.5D and 3D (#356) +- refactor of the MPI exchange routines and boundary routines to avoid buildup of roundoff errors at domain faces/edges that could lead to the sudden burst of div(B) or incoherences between MPI sub-domains (#357) +- fixed a bug that could lead to compilations error when targetting AMD APUs (#359) +- fixed a bug that led to the generation of incorrect subviews in 2.5D with vector_potential enabled (#362) +- fixed a bug that could lead to memory corruption when using the UCT_HLLD emf reconstruction scheme and DIMENSIONS < COMPONENTS (#363) +- reorganise the test to separate specific IO tests from physics tests (#367) + +### Added +- magnetic vector potential is now accessible from pydefix when enabled (#361) +- use ccache in the test suite to reduce the runtime of continuous integration (#364) +- automatically detects pybind11 path with cmake when using pydefix (#367) +- skeleton to run the test suite with pytest (#366) + + ## [2.2.02] 2025-10-18 ### Changed @@ -13,6 +30,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - fixed a bug that led to incorrect BX2 reconstruction when axis is not used on both sides of the domain (#345) - fixed a bug that led to incorrect reflective boundary conditions on B when DIMENSIONS < 3 (#345) - fixed a bug that led to incorrect dust stopping time when the adiabatic equation of state is used with "size" drag law (#353) +- fixed div(B) normalisation to avoid "too large div(B)" errors when this is actually due to nulls in |B| +- fixed insecure github actions settings (#373) ### Added diff --git a/CMakeLists.txt b/CMakeLists.txt index 364ee4ef3..6698aaec6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,10 +5,10 @@ endif() set (CMAKE_CXX_STANDARD 17) set(Idefix_VERSION_MAJOR 2) -set(Idefix_VERSION_MINOR 2) -set(Idefix_VERSION_PATCH 02) +set(Idefix_VERSION_MINOR 3) +set(Idefix_VERSION_PATCH 0) -project (idefix VERSION 2.2.02) +project (idefix VERSION 2.3.0) option(Idefix_MHD "enable MHD" OFF) option(Idefix_MPI "enable Message Passing Interface parallelisation" OFF) option(Idefix_HIGH_ORDER_FARGO "Force Fargo to use a PPM reconstruction scheme" OFF) @@ -16,6 +16,7 @@ option(Idefix_DEBUG "Enable Idefix debug features (makes the code very slow)" OF option(Idefix_RUNTIME_CHECKS "Enable runtime sanity checks" OFF) option(Idefix_WERROR "Treat compiler warnings as errors" OFF) option(Idefix_PYTHON "Enable python bindings (requires pybind11)" OFF) +set(Idefix_PROBLEM_DIR "${CMAKE_BINARY_DIR}" CACHE STRING "Problem directory to build for.") set(Idefix_CXX_FLAGS "" CACHE STRING "Additional compiler/linker flag") set(Idefix_DEFS "definitions.hpp" CACHE FILEPATH "Problem definition header file") option(Idefix_CUSTOM_EOS "Use custom equation of state" OFF) @@ -34,7 +35,6 @@ set_property(CACHE Idefix_PRECISION PROPERTY STRINGS Double Single) set(Idefix_LOOP_PATTERN "Default" CACHE STRING "Loop pattern for idefix_for") set_property(CACHE Idefix_LOOP_PATTERN PROPERTY STRINGS Default SIMD Range MDRange TeamPolicy TeamPolicyInnerVector) - # load git revision tools list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") include(GetGitRevisionDescription) @@ -51,7 +51,7 @@ if(Kokkos_ENABLE_CUDA) endif() # Add kokkos CMAKE files (required early since these set compiler options) -add_subdirectory(src/kokkos build/kokkos) +add_subdirectory(src/kokkos ${CMAKE_BINARY_DIR}/build/kokkos) include_directories(${Kokkos_INCLUDE_DIRS_RET}) # Add Idefix CXX Flags @@ -75,20 +75,22 @@ endif() add_executable(idefix) add_subdirectory(src build) -if(EXISTS ${PROJECT_BINARY_DIR}/setup.cpp) - target_sources(idefix PUBLIC ${PROJECT_BINARY_DIR}/setup.cpp) +# make absolute +get_filename_component(Idefix_PROBLEM_DIR_ABS ${Idefix_PROBLEM_DIR} ABSOLUTE BASE_DIR ${PROJECT_BINARY_DIR}) + +if(EXISTS ${Idefix_PROBLEM_DIR_ABS}/setup.cpp) + target_sources(idefix PUBLIC ${Idefix_PROBLEM_DIR_ABS}/setup.cpp) else() message(WARNING "No specific setup.cpp found in the problem directory (this message can be ignored if using python to define your problem)") endif() # If a CMakeLists.txt is in the problem dir (for problem-specific source files) # then read it -if(EXISTS ${PROJECT_BINARY_DIR}/CMakeLists.txt) - message(STATUS "Including problem-specific CMakeLists in '${PROJECT_BINARY_DIR}'") - add_subdirectory(${PROJECT_BINARY_DIR} build/setup) +if(EXISTS ${Idefix_PROBLEM_DIR_ABS}/CMakeLists.txt) + message(STATUS "Including problem-specific CMakeLists in '${Idefix_PROBLEM_DIR_ABS}'") + add_subdirectory(${Idefix_PROBLEM_DIR_ABS} build/setup) endif() - if(Idefix_MHD) add_compile_definitions("MHD=YES") else() @@ -99,10 +101,7 @@ if(Idefix_MPI) add_compile_definitions("WITH_MPI") find_package(MPI REQUIRED) target_link_libraries(idefix MPI::MPI_CXX) - target_sources(idefix - PUBLIC src/mpi.cpp - PUBLIC src/mpi.hpp - ) + add_subdirectory(src/mpi) endif() if(Idefix_HDF5) @@ -116,7 +115,13 @@ if(Idefix_HDF5) ) find_package(HDF5 REQUIRED) target_link_libraries(idefix "${HDF5_LIBRARIES}") - target_include_directories(idefix "${HDF5_INCLUDE_DIRS}") + message(STATUS "Found HDF5 include directories: ${HDF5_INCLUDE_DIRS}") + target_include_directories(idefix PUBLIC "${HDF5_INCLUDE_DIRS}") + if(Idefix_MPI) + if(NOT HDF5_IS_PARALLEL) + message(FATAL_ERROR "Parallel HDF5 required for Idefix_MPI but the found HDF5 library does not support it") + endif() + endif() message(STATUS "XDMF (hdf5+xmf) dumps enabled") else() set(Idefix_HDF5 OFF) @@ -124,6 +129,16 @@ endif() if(Idefix_PYTHON) add_compile_definitions("WITH_PYTHON") + find_package(Python3 REQUIRED COMPONENTS Interpreter Development) + + execute_process( + COMMAND "${Python3_EXECUTABLE}" -m pybind11 --cmakedir + OUTPUT_VARIABLE PYBIND11_CMAKE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + COMMAND_ERROR_IS_FATAL ANY + ) + + list(APPEND CMAKE_PREFIX_PATH "${PYBIND11_CMAKE_DIR}") if (NOT DEFINED Python_FIND_FRAMEWORK) set(Python_FIND_FRAMEWORK "LAST") # Use Apple's python only at last resort on Macos endif () @@ -213,7 +228,7 @@ if(${Idefix_PRECISION} STREQUAL "Single") endif() target_include_directories(idefix PUBLIC - "${PROJECT_BINARY_DIR}" + "${Idefix_PROBLEM_DIR_ABS}" ) target_include_directories(idefix PUBLIC src/kokkos/core/src @@ -234,6 +249,7 @@ target_include_directories(idefix PUBLIC src/gravity src/utils src/utils/iterativesolver + src/mpi src ) @@ -251,6 +267,7 @@ message(STATUS " Python: ${Idefix_PYTHON}") message(STATUS " Reconstruction: ${Idefix_RECONSTRUCTION}") message(STATUS " Precision: ${Idefix_PRECISION}") message(STATUS " Version: ${Idefix_VERSION}") +message(STATUS " Problem directory: '${Idefix_PROBLEM_DIR}'") message(STATUS " Problem definitions: '${Idefix_DEFS}'") if(Idefix_CUSTOM_EOS) message(STATUS " EOS: Custom file '${Idefix_CUSTOM_EOS_FILE}'") diff --git a/doc/python_requirements.txt b/doc/python_requirements.txt index c75fa02b7..28715fb71 100644 --- a/doc/python_requirements.txt +++ b/doc/python_requirements.txt @@ -6,12 +6,12 @@ # python -m pip install -r python_requirements.txt wheel>=0.38.4 # help forward compatibility for pip with old sphinx plugins -sphinx==5.3.0 -sphinx_rtd_theme==1.3.0 +sphinx==9.1.0 +sphinx_rtd_theme==3.1.0 sphinx_git==11.0.0 -breathe==4.34.0 +breathe==4.36.0 exhale==0.3.7 -m2r2==0.3.2 +m2r2==0.3.4 sphinx-copybutton==0.5.2 #sphinxcontrib-applehelp==1.0.7 matplotlib==3.10.0 diff --git a/doc/source/conf.py b/doc/source/conf.py index 595672c25..f47f6659c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -23,7 +23,7 @@ author = 'Geoffroy Lesur' # The full version, including alpha/beta/rc tags -release = '2.2.02' +release = '2.3.0' diff --git a/doc/source/modules/pydefix.rst b/doc/source/modules/pydefix.rst index f68284203..0a8daf753 100644 --- a/doc/source/modules/pydefix.rst +++ b/doc/source/modules/pydefix.rst @@ -23,7 +23,8 @@ Pydefix usage Idefix Configuration ++++++++++++++++++++ -In order to use Pydefix, you need to switch on ``Idefix_PYTHON`` in cmake. This will auto-detect Python and check that pybind11 can be used effectively. +In order to use Pydefix, you need to switch on ``Idefix_PYTHON`` in cmake. This will auto-detect Python and check that pybind11 can be used effectively. If you are using a python environement manager such as venv or conda, make sure to activate the right environement *before* running cmake, as idefix +now includes the full path to python and pybind11 in its executable file. Run Idefix with Pydefix diff --git a/doc/source/reference/makefile.rst b/doc/source/reference/makefile.rst index 23052d92c..8f58ceedb 100644 --- a/doc/source/reference/makefile.rst +++ b/doc/source/reference/makefile.rst @@ -59,6 +59,12 @@ Several options can be enabled from the command line (or are accessible with ``c The number of ghost cells is automatically adjusted as a function of the order of the reconstruction scheme. *Idefix* uses 2 ghost cells when ``ORDER < 4`` and 3 ghost cells when ``ORDER = 4`` +``-D Idefix_PROBLEM_DIR=.`` + Specify where to find the problem directory to build *Idefix* out of source. + Place yourself in the ``build`` directory you want to build in and call the ``cmake`` by : + + pointing the *Idefix* source directory via ``IDEFIX_DIR`` as usual. + + using ``-D Idefix_PROBLEM_DIR`` to point the problem to build. + ``-D Kokkos_ENABLE_OPENMP=ON`` Enable OpenMP parallelisation on supported compilers. Note that this can be enabled simultaneously with MPI, resulting in a hybrid MPI+OpenMP compilation. diff --git a/doc/source/testing.rst b/doc/source/testing.rst index 888ac2057..59bd2177a 100644 --- a/doc/source/testing.rst +++ b/doc/source/testing.rst @@ -71,9 +71,17 @@ How tests are driven (testme scripts) Each test directory contains a small Python "testMe" driver that uses the helper Python class documented in the repository: +- See the test launcher documentation: :doc:`test.py ` - See the test helper documentation: :doc:`idfxTest ` -That helper (idfxTest) is responsible for: +The test launcher (test.py) is responsible for: + +- Loading all the test definitions by search all the ``testme.json`` files in the ``test`` + directory. +- Calling the :doc:`idfxTest ` helper to run the particular test. +- Generate reports about success and failures. + +The helper (idfxTest) is responsible for: - parsing TESTME_OPTIONS-like flags (precision, MPI, CUDA, reconstruction, vector potential, etc.), - calling configure / compile / run, @@ -108,10 +116,12 @@ Relevant files - Workflow entry point: .github/workflows/idefix-ci.yml - Reusable jobs: .github/workflows/idefix-ci-jobs.yml +- Test launcher documentation: :doc:`test launcher ` - Test helper documentation: :doc:`idfxTest ` .. toctree:: :maxdepth: 2 :caption: Contents: + testing/testLauncher.rst testing/idfxTest.rst diff --git a/doc/source/testing/idfxTest.rst b/doc/source/testing/idfxTest.rst index b425d3fe2..96e364465 100644 --- a/doc/source/testing/idfxTest.rst +++ b/doc/source/testing/idfxTest.rst @@ -74,6 +74,9 @@ The constructor parses command-line arguments using ``argparse``. These options * - ``-Werror`` - ``Werror`` - Treat compiler warnings as errors. + * - ``-ccache`` + - ``ccache`` + - Enable usage of ccache to build the tests and reduce the build time. Main Methods ------------ diff --git a/doc/source/testing/testLauncher.rst b/doc/source/testing/testLauncher.rst new file mode 100644 index 000000000..aea7f5c3d --- /dev/null +++ b/doc/source/testing/testLauncher.rst @@ -0,0 +1,383 @@ +========================== +Test launcher and reporter +========================== + +Overview +-------- + +The class :doc:`idfxTest ` provides the toolbox to implement an *Idefix* integration test for validation. +In order to ease launching all the tests, the user might prefer to use directly the ``./test.py`` command at the +root of the *Idefix* sources. + +This script will run all the listed variants of *Idefix* and build a report in the terminal. At the +end of the run a standard ``junit.xml`` file is produced. This one can be translated into a browsable +HTML file. + +Depencencies +------------ + +Before using :doc:`idfxTest ` you need to install some Python depencencies (possibly in a ``virtual env``): + +.. code-block:: shell + + pip install -r test/python_requirements.txt + +Running +------- + +To run the test you can basically : + +.. code-block:: shell + + # Run all tests + ./test.py + + # Run all tests in ./tests/HD + ./test.py -subdir=./tests/HD + + # Select in more details the tests containing the "single" keyword + # See pytest documentation for the exact advanced semantic + ./test.py -subdir=./tests/HD -k single + + # Run in verbose + ./test.py -v + +The result of the execution will be an output like : + +.. code-block:: text + + ============================================ test session starts ============================================= + collected 52 items / 44 deselected / 8 selected + + test.py::test_idefix_build_run_check[HD/sod-iso-idefix.ini-single-reconstruction-2] PASSED [ 12%] + test.py::test_idefix_build_run_check[HD/sod-iso-idefix-hll.ini-single-reconstruction-2] PASSED [ 25%] + test.py::test_idefix_build_run_check[HD/sod-iso-idefix-hllc.ini-single-reconstruction-2] PASSED [ 37%] + test.py::test_idefix_build_run_check[HD/sod-iso-idefix-tvdlf.ini-single-reconstruction-2] PASSED [ 50%] + test.py::test_idefix_build_run_check[HD/sod-idefix.ini-single-reconstruction-2] PASSED [ 62%] + test.py::test_idefix_build_run_check[HD/sod-idefix-hll.ini-single-reconstruction-2] PASSED [ 75%] + test.py::test_idefix_build_run_check[HD/sod-idefix-hllc.ini-single-reconstruction-2] PASSED [ 87%] + test.py::test_idefix_build_run_check[HD/sod-idefix-tvdlf.ini-single-reconstruction-2] PASSED [100%] + + ---------- generated xml file: idefix-tests.junit.xml --------------------------------------------------------- + =============================== 8 passed, 44 deselected in 73.03s (0:01:13) =================================== + +When an error is detected, the output of the command will be printed at the end. + +HTML report +----------- + +If you want to to generate an HTML page from the report you can proceed by using the Python package ``junit2html`` : + +.. code-block:: shell + + # install junit2html + pip install junit2html + + # convert the report + junit2html ./idefix-tests.junit.xml ./idefix-tests.junit.html + +Advanced usage of the command +----------------------------- + +Here the options supported by the test script : + +.. code-block:: text + + usage: test.py [-h] [-noplot] [-ploterr] [-cmake CMAKE [CMAKE ...]] [-definitions DEFINITIONS] + [-dec DEC [DEC ...]] [-check] [-cuda] [-intel] [-hip] [-single] [-vectPot] + [-reconstruction RECONSTRUCTION] [-idefixDir IDEFIXDIR] [-mpi] [-all] [-init] + [-Werror] [-ccache] [-restart] [-v] [--help-pytest] [-fake] [-subdir SUBDIR] + + options: + -h, --help show this help message and exit + -noplot disable plotting in standard tests + -ploterr Enable plotting on error in regression tests + -cmake CMAKE [CMAKE ...] + CMake options + -definitions DEFINITIONS + definitions.hpp file + -dec DEC [DEC ...] MPI domain decomposition + -check Only perform regression tests without compilation + -cuda Test on Nvidia GPU using CUDA + -intel Test compiling with Intel OneAPI + -hip Test on AMD GPU using HIP + -single Enable single precision + -vectPot Enable vector potential formulation + -reconstruction RECONSTRUCTION + set reconstruction scheme (2=PLM, 3=LimO3, 4=PPM) + -idefixDir IDEFIXDIR Set directory for idefix source files (default $IDEFIX_DIR) + -mpi Enable MPI + -all Do all test suite (otherwise, just do the test with the current configuration) + -init Reinit reference files for non-regression tests (dangerous!) + -Werror Consider warnings as errors + -ccache Use ccache to reduce the build time over multiple run of the test suite. + -restart Enable creating a restart from a checkpoint. + -v, --verbose Enable verbose mode, by not capturing the output. + --help-pytest Display the options you can transmit directly to pytest in addition to the specific to idefix tests. + -fake Make a fake run by just logging the actions to validate that we generate same command over refactoring. + -subdir SUBDIR Select the test in the given subdir not to run all. + +The script is built on top of the ``pytest`` command so you automatically get access +to all the advanced option this command provide. Here a few examples : + +.. code-block:: shell + + # get the pytest help + ./test.py --help-pytest + + # let pytest filtering the tests + ./test.py -k "single and mpi" + + # stop on first failure + ./test.py -x + + # re-run only the last failed tests + ./test.py --lf + +Definition of the tests +----------------------- + +The script looks for all the files named ``testme.json`` in the ``test`` directory and all its sub-directories. +This file describes the combination of parameters used to produce the list of *Idefix* runs to check. +For a single basic configuration one can use : + +.. code-block:: json + + { + "variants": { + "dumpname": "dump.0001.dmp", + "ini": "idefix.ini", + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "standardTest": false, + "tolerance": 0 + } + } + +Available parameters +-------------------- + +The parameters in the ``variants`` dictionnary correspond to the options supported by the +:doc:`idfxTest ` script to configure the build and run of *Idefix*. + +In addition there is some extra keys which are dedicated to the json interpretation layer : + +.. list-table:: + :header-rows: 1 + + * - Option + - Default + - Description + * - ``dumname`` + - ``dump.0001.dmp`` + - Dump file to use to check the results after the run. + * - ``ini`` + - ``idefix.ini`` + - The configuration file to use. + * - ``tolerance`` + - ``0`` + - The margins to allow when checking the results. + * - ``standardTest`` + - ``true`` + - Runs any Python-based standard tests (e.g., ``testidefix.py``) present in the test directory for additional validation. + * - ``nonRegressionTest`` + - ``true`` + - Compares the output dump file to a reference file using RMSE; fails if the error exceeds the tolerance. + * - ``nonRegressionTestIni`` + - Same than ``ini`` + - When making restart you might want to make the check using the inirial configuration file. + * - ``multirun`` + - ``{}`` + - See the multi-run section below. + +Looping over parameters +----------------------- + +You might want to explore running Idefix within parameter ranges (configuration files, modes). +For this simply list the values you want as a list. The test script will automatically +generate all combinations. + +.. code-block:: json + + { + "variants": { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini"], + "vectPot": [false, true], + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "standardTest": false, + "tolerance": 0 + } + } + +It will automatically produce the tests : + +* HD/sod-iso-idefix.ini +* HD/sod-iso-idefix.ini-vectPot +* HD/sod-iso-idefix.ini-mpi +* HD/sod-iso-idefix.ini-vectPot-mpi +* HD/sod-iso-idefix-hll.ini +* HD/sod-iso-idefix-hll.ini-vectPot +* HD/sod-iso-idefix-hll.ini-mpi +* HD/sod-iso-idefix-hll.ini-vectPot-mpi + +Specific keys +------------- + +There is some keys which are by default some arrays, they will not be considered +as combination rules : + +* ``dec`` +* ``multirun`` +* ``restart_no_overwrite`` +* ``tolerance`` + +Reduce the combinations +----------------------- + +You might not want to see all the combinations but just a few, for this, you +can list several sets as a list. Here using single only on half of the modes. + +.. code-block:: json + + { + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-hll.ini"], + "vectPot": true, + "single": true, + "reconstruction": 2, + "mpi": [false, true], + "standardTest": false, + "tolerance": 0 + } + ] + } + +* HD/sod-iso-idefix.ini +* HD/sod-iso-idefix.ini-mpi +* HD/sod-iso-idefix-hll.ini-single-vectPot +* HD/sod-iso-idefix-hll.ini-single-vectPot-mpi + +Naming the test +--------------- + +If you prefer to see the options appearing in a specific order in the generated test name, +you can provide the key ``namings`` listing as a comma separated list the variables in +the order you want to see them composing the test name. + +.. code-block:: json + + { + "namings": "ini,single,mpi", + "variants": { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini"], + "vectPot": false, + "single": [false, true], + "reconstruction": 2, + "mpi": [false, true], + "standardTest": false, + "tolerance": 0 + } + } + + +By default, the alphabetical order is used. + +When clauses +------------ + +You can also dynamically override a specific value when a parameter value is selected. +It is just like if you used and IF statement. + +.. code-block:: json + + { + "namings": "ini,single,mpi", + "variants": { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini"], + "vectPot": false, + "single": [false, true], + "reconstruction": 2, + "mpi": [false, true], + "standardTest": false, + "tolerance": 0 + }, + "when": { + "conditions": { + "single": true + }, + "apply": { + "reconstruction": 1 + } + } + } + +In this case, ``reconstruction`` will be set to 1 when ``single`` is equal to ``true``. + +Note that the ``conditions`` field can contains several values which will be treaded +as and AND logical operator. + +You can provide several ``when`` clauses by using a list of them instead of directly +the dictionnary. + +Making multi-run steps +---------------------- + +In order to validate checkpoint restart, or continuing a simulation with dirfferent +tunnings the script supports decribing multi-run configurations. + +They are described like : + +.. code-block:: json + + { + "variants": { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "standardTest": false, + "tolerance": 0, + "dec": [2,2,2], + "multirun": [ + { + },{ + "mpi": true, + "restart": true, + "restart_no_overwrite": ["dump.0001.dmp", "data.0005.vtk"] + } + ] + }, + } + +Using the idfxTest options +-------------------------- + +As the ``test.py`` uses the class described in :doc:`idfxTest ` it also supports all the +command line options it offers. + +Most usefull might be the enabling of ``ccache`` to reduce the compilation time from one +run to another. + +.. code-block:: shell + + ./test.py -ccache diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000..b3482664f --- /dev/null +++ b/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +markers= + default: Test to run by default. +python_files="test_*.py" +junit_logging="system-out" +junit_log_passing_tests=false diff --git a/pytools/idfx_test.py b/pytools/idfx_test.py index 25bac76b4..fd28ccb61 100644 --- a/pytools/idfx_test.py +++ b/pytools/idfx_test.py @@ -4,6 +4,7 @@ import subprocess import sys import re +import json import numpy as np import matplotlib.pyplot as plt @@ -22,7 +23,7 @@ class bcolors: UNDERLINE = '\033[4m' class idfxTest: - def __init__ (self): + def __init__ (self, current_test_file, name=""): parser = argparse.ArgumentParser() idefix_dir_env = os.getenv("IDEFIX_DIR") @@ -99,19 +100,94 @@ def __init__ (self): help="Consider warnings as errors", action="store_true") + parser.add_argument("-ccache", + help="Use ccache to reduce the build time over multiple run of the test suite.", + action="store_true") + + parser.add_argument("-restart", + help="Enable creating a restart from a checkpoint.", + action='store_true') + + parser.add_argument("-v", "--verbose", + help="Enable verbose mode, by not capturing the output.", + action="store_true") + + parser.add_argument("--help-pytest", + help="Display the options you can transmit directly to pytest in addition to the specific to idefix tests.", + action="store_true") + + parser.add_argument("-fake", + help="Make a fake run by just logging the actions to validate that we generate same command over refactoring.", + action="store_true") + + # this option is not used directly by direct users of idfxTest but by idx_test_gen + parser.add_argument("-subdir", + default="./test", + help="Select the test in the given subdir not to run all.", + type=str) args, unknown=parser.parse_known_args() # transform all arguments from args into attributes of this instance self.__dict__.update(vars(args)) + # store the full path of problem directory + self.currentTestFile = current_test_file + self.currentTestName = name + self.problemDir=os.path.dirname(current_test_file) self.referenceDirectory = os.path.join(idefix_dir_env,"reference") # current directory relative to $IDEFIX_DIR/test (used to retrieve the path ot reference files) - self.testDir=os.path.relpath(os.curdir,os.path.join(idefix_dir_env,"test")) - - def configure(self,definitionFile=""): + self.testDir=os.path.relpath(self.problemDir,os.path.join(idefix_dir_env,"test")) + # build directory, currently inside the test named build-test + self.buildDir=os.path.join(self.problemDir,"build-test") + # remind what build we dit last + self.lastCmakeCmd="" + # subdir + self.filterSubdir=args.subdir + # save + self.cmdArgs = vars(args) + self.cmdArgs.update({ + "restart_no_overwrite": [], + }) + self.log=[] + # when making a restart we should not overrite those files (will be checked) + self.restart_no_overwrite=[] + + # forward args for pytest + if args.verbose: + unknown.append("--capture=no") + if args.help_pytest: + unknown.append("--help") + # remaining args + self.remainingArgs=unknown + + def addLog(self, entry): + if self.fake: + self.log.append(entry) + with open(os.path.join(self.problemDir,"testsuite.log.json"), "w+") as fp: + json.dump(self.log, fp, indent='\t') + + def applyConfig(self, config: dict={}): + # check args + for key, value in config.items(): + if key not in ['ini', 'testfile', 'testname', 'dumpname']: + assert key in self.cmdArgs, f"The given configuration overriding try to set an invalid paramater : {key}={value}" + + # override options + newArgs = {} + newArgs.update(self.cmdArgs) + newArgs.update(config) + + # replace in dict + self.__dict__.update(newArgs) + + def _genCmakeCommand(self,definitionFile=""): comm=["cmake"] # add source directory comm.append(self.idefixDir) + + # we will build in ./build-test so problem dir is parent + comm.append("-DIdefix_PROBLEM_DIR="+self.problemDir) + # add specific options for opt in self.cmake: comm.append("-D"+opt) @@ -155,7 +231,7 @@ def configure(self,definitionFile=""): else: self.definitions="definitions.hpp" - comm.append("-DIdefix_DEFS="+self.definitions) + comm.append("-DIdefix_DEFS="+os.path.join(self.problemDir, self.definitions)) if(self.mpi): comm.append("-DIdefix_MPI=ON") @@ -169,20 +245,88 @@ def configure(self,definitionFile=""): elif(self.reconstruction==4): comm.append("-DIdefix_RECONSTRUCTION=Parabolic") + # export ccache env + if self.ccache: + comm.append("-DCMAKE_CXX_COMPILER_LAUNCHER=ccache") + + # ok + return comm + + + def configure(self,definitionFile="", reuse_last_same_build=True, override: dict={}): + # log + self.addLog({"call": "configure", "args":{ + 'definitionFile': definitionFile, + 'reuse_last_same_build': reuse_last_same_build, + 'override': override + }}) + + # gen command + comm = self._genCmakeCommand(definitionFile) + # log + print("***************** CALLING CMAKE *******************") + print(f"mkdir -p {self.buildDir}") + print(f"cd {self.buildDir}") + print(' '.join(comm)) + print("***************************************************") + + # not action needed if same command or force no rebuild + if reuse_last_same_build == True and self.lastCmakeCmd == ' '.join(comm): + print("SKIP ALREADY DONE") + return + + # run try: - cmake=subprocess.run(comm) - cmake.check_returncode() + # do cleanup + self.clean() + + # log and in fake mode we do not execute + self.addLog({"command": comm}) + + # call cmake + if not self.fake: + cmake=subprocess.run(comm, cwd=os.path.abspath(self.buildDir)) + cmake.check_returncode() except subprocess.CalledProcessError as e: print(bcolors.FAIL+"***************************************************") print("Cmake failed") print("***************************************************"+bcolors.ENDC) raise e + finally: + # remind for next time + self.lastCmakeCmd = ' '.join(comm) + + def clean(self): + # log and in fake mode we do not execute + self.addLog({"call": "clean", "args":{}}) + if self.fake: + return + + # remove the build directory before re-creating it + if os.path.exists(self.buildDir): + shutil.rmtree(self.buildDir) + + # recreate + os.makedirs(self.buildDir, exist_ok=False) def compile(self,jobs=8): + self.addLog({"call": "compile", "args":{ + 'jobs': jobs, + }}) + try: - make=subprocess.run(["make","-j"+str(jobs)]) - make.check_returncode() + comm = ["make","-j"+str(jobs)] + self.addLog({"command": comm}) + + print("***************************************************") + print(f"cd {os.getcwd()}") + print(' '.join(comm)) + print("***************************************************") + + if not self.fake: + make=subprocess.run(comm, cwd=os.path.abspath(self.buildDir)) + make.check_returncode() except subprocess.CalledProcessError as e: print(bcolors.FAIL+"***************************************************") print("Compilation failed") @@ -190,7 +334,14 @@ def compile(self,jobs=8): raise e def run(self, inputFile="", np=2, nowrite=False, restart=-1): - comm=["./idefix"] + # log + self.addLog({"call": "run", "args":{ + 'np': np, + 'nowrite': nowrite, + 'restart': restart, + }}) + + comm=[os.path.join(self.buildDir,"idefix")] if inputFile: comm.append("-i") comm.append(inputFile) @@ -218,9 +369,16 @@ def run(self, inputFile="", np=2, nowrite=False, restart=-1): comm.append("-restart") comm.append(str(restart)) + print("***************************************************") + print(f"cd {os.getcwd()}") + print(' '.join(comm)) + print("***************************************************") + try: - make=subprocess.run(comm) - make.check_returncode() + self.addLog({"command": comm}) + if not self.fake: + make=subprocess.run(comm, cwd=self.problemDir) + make.check_returncode() except subprocess.CalledProcessError as e: print(bcolors.FAIL+"***************************************************") print("Execution failed") @@ -230,6 +388,9 @@ def run(self, inputFile="", np=2, nowrite=False, restart=-1): self._readLog() def _readLog(self): + if self.fake: + return + if not os.path.exists('./idefix.0.log'): # When no idefix file is produced, we leave return @@ -274,6 +435,12 @@ def _readLog(self): self.perf=float(line.group(1)) def checkOnly(self, filename, tolerance=0): + # log + self.addLog({"call": "checkOnly", "args":{ + 'filename': filename, + 'tolerance': tolerance, + }}) + # Assumes the code has been run manually using some configuration, so we simply # do the test suite witout configure/compile/run self._readLog() @@ -288,8 +455,14 @@ def checkOnly(self, filename, tolerance=0): self.nonRegressionTest(filename, tolerance) def standardTest(self): - if os.path.exists(os.path.join('python', 'testidefix.py')): - os.chdir("python") + # log and in fake mode do not execute. + self.addLog({"call": "standardTest", "args":{}}) + if self.fake: + return + + if os.path.exists(os.path.join(self.problemDir, 'python', 'testidefix.py')): + oldPwd = os.getcwd() + os.chdir(os.path.join(self.problemDir, "python")) comm = [sys.executable, "testidefix.py"] if self.noplot: comm.append("-noplot") @@ -304,12 +477,19 @@ def standardTest(self): print("***************************************************"+bcolors.ENDC) raise e print(bcolors.OKCYAN+"Standard test succeeded"+bcolors.ENDC) - os.chdir("..") + os.chdir(oldPwd) else: print(bcolors.WARNING+"No standard testidefix.py for this test"+bcolors.ENDC) sys.stdout.flush() def nonRegressionTest(self, filename,tolerance=0): + # log and in fake mode do not execute. + self.addLog({"call": "nonRegressionTest", "args":{ + "filename": filename, + "tolerance": tolerance + }}) + if self.fake: + return fileref=os.path.join(self.referenceDirectory, self.testDir, self._getReferenceFilename()) if not(os.path.exists(fileref)): @@ -333,6 +513,14 @@ def nonRegressionTest(self, filename,tolerance=0): sys.stdout.flush() def compareDump(self, file1, file2,tolerance=0): + self.addLog({"call": "compareDump", "args":{ + "file1": file1, + "file2": file2, + "tolerance": tolerance, + }}) + if self.fake: + return + Vref=readDump(file1) Vtest=readDump(file2) error=self._computeError(Vref,Vtest) @@ -347,6 +535,13 @@ def compareDump(self, file1, file2,tolerance=0): def makeReference(self,filename): + # log and in fake mode do not execute. + self.addLog({"call": "compareDump", "args":{ + "filename": filename, + }}) + if self.fake: + return + self._readLog() targetDir = os.path.join(self.referenceDirectory,self.testDir) if not os.path.exists(targetDir): diff --git a/pytools/idfx_test_gen.py b/pytools/idfx_test_gen.py new file mode 100644 index 000000000..e552c321d --- /dev/null +++ b/pytools/idfx_test_gen.py @@ -0,0 +1,249 @@ +##################################################################################### +# Idefix MHD astrophysical code +# Copyright(C) Sébastien Valat +# and other code contributors +# Licensed under CeCILL 2.1 License, see COPYING for more information +##################################################################################### + +import copy +import pytest + +DO_NOT_LOOP_ON = ['restart_no_overwrite', "dec", "multirun", "check_file_produced"] + +class IdefixDirTestGenerator: + ''' + Class used to generate the various configuration to run by parsing the + files `testme.py` found in the hierarchy of the /test directory of Idefix. + ''' + + def __init__(self, currentTestFile: str, name: str = ""): + ''' + Constructor or the class. + + Args: + currentTestFile (str): + Path the the current python file. Normally you + simply pass __file__ to this parameter. + name (str): + Define a name for the test, can be empty. + ''' + + self.currentTestFile = currentTestFile + self.currentTestName = name + + # generate the list of configs to run + def genTestConfigs(self, names:str, params, whenClauses = {}) -> list: + ''' + Generate the the list of configurations as pytest parameters. + It will unpack the configuration set by looping on all combinations defined + by the given sets. + + Args: + names (str): + Comma separated list of variables do consider to build the + name of the file. + params (dict|list): + A configuration set as a dictionnary or a list of + configuration set. + whenCaluses (dict|list): + Provide a set of clauses to apply after unpacking + the configuration so we can patch some values depending on some others. + + Returns: + A list of pytest.param() ready to be fiven to parametrized pytest functions. + ''' + # get name ordering list + nameList = names.split(',') + + # gen list of complete configs + all_configs = [] + if isinstance(params, dict): + all_configs += self._genOneConfigSeries(names, params) + elif isinstance(params, list): + for p in params: + all_configs += self._genOneConfigSeries(names, p) + else: + raise Exception("Should never be called !") + + # convert as parametrize with nice name + result = [] + for config in all_configs: + # append the file + config['testfile'] = self.currentTestFile + config['testname'] = self.currentTestName + # gen name + nameParts = [self.currentTestName] + for name in nameList: + if isinstance(config[name], bool): + if config[name]: + nameParts.append(name) + elif isinstance(config[name], str): + nameParts.append(str(config[name])) + else: + nameParts.append(f"{name}-{config[name]}") + confName = "-".join(nameParts) + + # apply when clause + config = self._applyWhen(config, whenClauses) + + result.append(pytest.param(config, id=confName)) + + # ok + return result + + def extractNamingParameters(self, params) -> list: + ''' + Loop on the parameters and check automatically what are the list of variable parameters. + + Args: + params (list|dict): + The list of configuration sets to scan or a single set. + + Returns: + The list of names of the variable parameters. + ''' + + # if not a list make a list + if not isinstance(params, list): + params = [params] + + # see params + seen = {} + variables = [] + + # loop + for param_set in params: + for key, value in param_set.items(): + if key in variables: + pass + elif key in DO_NOT_LOOP_ON: + pass + elif isinstance(value, list): + variables.append(key) + elif key in seen and seen[key] != value: + variables.append(key) + elif key not in seen: + seen[key] = value + + # by default sort by alphabetic order about var names + # TODO make something better by assigning a priority to vars + variables.sort() + + # ok + return ','.join(variables) + + def _genNextLevelCombinations(self, input: list, paramName: str, paramValues: list) -> list: + ''' + Take an input case list and unpack the given parameter to build the new combinations. + + Args: + input (list): + The incoming list of combinations already unpacked before this call. + paramName (str): + Name of the parameter to unpack. + paramValues (list): + The list of values to unpack and to build combinations for. + + Returns: + The updated list of run sets. + ''' + result = [] + for entry in input: + for value in paramValues: + v = copy.deepcopy(entry) + v[paramName] = value + result.append(v) + return result + + def _genOneConfigSeries(self, names: str, config: dict) -> list: + ''' + Generate the the list of configurations as pytest parameters. + It will unpack the configuration set by looping on all combinations defined + by the given sets. + + Args: + names (str): + Comma separated list of variables do consider to build the + name of the file. + config (dict): + A configuration set as a dictionnary. + + Returns: + A list of pytest.param() ready to be fiven to parametrized pytest functions. + ''' + # get name ordering list + nameList = names.split(',') + + # if there is ini in the list we put it at the end + loopOrder = nameList.copy() + if 'ini' in loopOrder: + loopOrder.remove('ini') + loopOrder.append('ini') + + # init core with everything not a list + core = {} + for key, value in config.items(): + if isinstance(value, list) and key not in DO_NOT_LOOP_ON: + assert key in nameList, f"All variable parameteres should be ordered in the names list, '{key}' is not." + else: + core[key] = copy.deepcopy(value) + + # at start we have only default core + result = [core] + + # loop + for key in loopOrder: + value = config[key] + assert isinstance(value, list), f"This parameter is marked as a list but is not a list : {key}={value} !" + result = self._genNextLevelCombinations(result, key, value) + + # ok + return result + + def _matchWhenClause(self, config: dict, when_clause: dict) -> bool: + ''' + Check the matching of a given when clause on the given configuration. + + Args: + config (dict): The configuration. + when_clause (dict): The when clause to check. + + Returns: + True if the clause, match, False otherwise. + ''' + for key, value in when_clause.items(): + if config[key] != value: + return False + return True + + def _applyWhen(self, config: dict, when: dict) -> dict: + ''' + Check if the when clause applies and apply if if any. + + Args: + config (dict): The configuration. + when_clause (dict): The when clause to check and apply. + + Returns: + The fixed config. + ''' + # nothing to do + if when == {}: + return config + + # clone + result = copy.deepcopy(config) + + # loop on when + if isinstance(when, list): + for clause in when: + if self._matchWhenClause(config, clause['conditions']): + result.update(clause['apply']) + elif isinstance(when, dict): + if self._matchWhenClause(config, when['conditions']): + result.update(when['apply']) + else: + raise Exception(f"Invalid type for 'when' : {when}") + + # ok + return result diff --git a/pytools/idfx_test_run.py b/pytools/idfx_test_run.py new file mode 100644 index 000000000..d14de0b41 --- /dev/null +++ b/pytools/idfx_test_run.py @@ -0,0 +1,257 @@ +##################################################################################### +# Idefix MHD astrophysical code +# Copyright(C) Sébastien Valat +# and other code contributors +# Licensed under CeCILL 2.1 License, see COPYING for more information +##################################################################################### + +import os +import sys +import json +import glob +import copy +import pytest +# idefix test class +import pytools.idfx_test as tst +from pytools.idfx_test_gen import IdefixDirTestGenerator +from contextlib import contextmanager + +@contextmanager +def moveInDir(path): + ''' + Change current working dir for the given directoy for what is inside the + with statement. + ''' + oldpwd = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(oldpwd) + +class IdexPytestRunner: + ''' + Implement the Idefix pytest runner to scan and run all the tests described by the files + testme.json into the /test directory of Idefix sources. + ''' + + def __init__(self, parentScriptFile: str): + self.currentTestFile="" + self.currentTestRunner: tst.idfxTest=None + self.parentScritFile=parentScriptFile + self.filterSubdir = os.environ.get("IDEFIX_TEST_FILTER_SUBDIR", "./test/") + + def _validateNaming(self, namings: str, autoExtracted: str, file: str): + names = namings.split(',') + for n in autoExtracted.split(','): + if not n in names: + raise Exception(f"Naming parameter list not match the auto-detectection, some are missinge. You gave : '{names}', detected '{autoExtracted}' into {file}") + + def _makeVariableArgAsList(self, namings: str, variants) -> list: + # make as a list + if isinstance(variants, list) == False: + variants = [variants] + + # split namings + namings = namings.split(",") + + # loop on each + for variant in variants: + for name in namings: + if name in variant and isinstance(variant[name], list) == False: + variant[name] = [variant[name]] + + def genTests(self) -> list: + sourceDir = os.path.dirname(self.parentScritFile) + + # loop over all tests + result = [] + with moveInDir(sourceDir): + # if missing / + if self.filterSubdir != "" and self.filterSubdir[-1] != "/": + self.filterSubdir += "/" + + # walk in test dir to find the tests & sort by name + testfiles = glob.glob(self.filterSubdir + "**/testme.json", recursive=True) + testfiles.sort() + + # loop over each + for testfile in testfiles: + try: + # calc some paths + testfileRelPath = os.path.relpath(testfile, os.path.join(sourceDir, 'test')) + testfilePath = os.path.abspath(os.path.join('test',testfileRelPath)) + testfileDir = os.path.dirname(testfileRelPath) + + # load json & build the inner test combinations + with open(testfilePath, 'r') as fp: + test = json.load(fp) + idefixTestGenerator=IdefixDirTestGenerator(testfilePath, testfileDir) + if 'namings' in test: + namings = test['namings'] + autoExtracted = idefixTestGenerator.extractNamingParameters(test['variants']) + self._validateNaming(namings, autoExtracted, testfilePath) + else: + namings = idefixTestGenerator.extractNamingParameters(test['variants']) + + # required to simplify the algos later, if var is listed as variable, we need to loop over it. + self._makeVariableArgAsList(namings, test['variants']) + + # gen + result += idefixTestGenerator.genTestConfigs(namings, test['variants'], test.get('when', {})) + except Exception as e: + raise Exception(f"Fail to generate tests from {testfileRelPath} : {e}") + + # ok + return result + + def run(self, config: dict) -> None: + # clone before modify to not modity for caller + config = copy.deepcopy(config) + + # print config + print("***************************************************") + print(json.dumps(config, indent='\t')) + print("***************************************************") + + # extract some infos for local usage + testfile = config["testfile"] + dumpname = config['dumpname'] + testname = config["testname"] + tolerance = config.get("tolerance", 0) + definitionFile = config.get("definitionFile", "") + standardTest = config.get("standardTest", True) + nonRegressionTest = config.get("nonRegressionTest", True) + nonRegressionTestIni = config.get("nonRegressionTestIni", None) + check_file_produced = config.get("check_file_produced", []) + problemDir = os.path.dirname(testfile) + + # cleanup some keyword not handled at the + # level of idx_test so we don't perturbate it + del config['dumpname'] + if 'definitionFile' in config: + del config['definitionFile'] + if 'tolerance' in config: + del config['tolerance'] + if 'standardTest' in config: + del config['standardTest'] + if 'nonRegressionTest' in config: + del config['nonRegressionTest'] + if 'nonRegressionTestIni' in config: + del config['nonRegressionTestIni'] + + # if switch from test, rebuild the runner (a runner make for one dir) + if self.currentTestFile != testfile: + self.currentTestRunner = tst.idfxTest(testfile, name=testname) + self.currentTestFile = testfile + + # run + with moveInDir(problemDir): + self._runNonRegression(dumpname, config['ini'], config, tolerance=tolerance, definitionFile=definitionFile, standardTest=standardTest, nonReg=nonRegressionTest, nonRegIni=nonRegressionTestIni) + + # check produced + for file in check_file_produced: + if not os.path.exists(file): + raise Exception(f"Don't find expected file to be produced by the run : {file} !") + + def _runNonRegression(self, dumpname, ini, config_override, tolerance=0, definitionFile="", nonReg=True, nonRegIni=None, standardTest=True, first_run_ini=None,first_run_dumpname=None,configure_and_compile=True): + if 'multirun' in config_override: + self._runNonRegMultirun(dumpname, ini, config_override, tolerance=tolerance, nonReg=nonReg, nonRegIni=nonRegIni, standardTest=standardTest, configure_and_compile=configure_and_compile, definitionFile=definitionFile, first_run_ini=first_run_ini, first_run_dumpname=first_run_dumpname) + else: + # single basic run + self._runNonRegSingleRun(dumpname, ini, config_override, tolerance=tolerance, nonReg=nonReg, standardTest=standardTest, configure_and_compile=configure_and_compile, definitionFile=definitionFile, first_run_ini=first_run_ini, first_run_dumpname=first_run_dumpname) + + def _runNonRegMultirun(self, dumpname, ini, config_override, tolerance=0, definitionFile="", nonReg=True, nonRegIni=None, standardTest=True, first_run_ini=None,first_run_dumpname=None,configure_and_compile=True): + # check + assert 'multirun' in config_override + + # loop over runs + for run in config_override['multirun']: + # copy config + run_config = copy.deepcopy(config_override) + + # patch a bit + del run_config['multirun'] + run_config.update(run) + nonReg=run_config.get('nonRegressionTest', nonReg) + dumpname=run_config.get('dumpname', dumpname) + if 'nonRegressionTest' in run_config: + del run_config['nonRegressionTest'] + standardTest=run_config.get('standardTest', standardTest) + if 'standardTest' in run_config: + del run_config['standardTest'] + + # make single run + self._runNonRegSingleRun(dumpname, run_config['ini'], run_config, definitionFile=definitionFile, tolerance=tolerance, nonReg=nonReg, nonRegIni=nonRegIni, standardTest=standardTest) + + def _runNonRegSingleRun(self, dumpname, ini, config_override, tolerance=0, definitionFile="", nonReg=True, nonRegIni=None, standardTest=True, first_run_ini=None,first_run_dumpname=None,configure_and_compile=True): + # build the runner + idefixTest = self.currentTestRunner + + # handle special override which should not go into + # idefixTest because it is not supported by the idfx_test layer. + config_override = copy.deepcopy(config_override) + nonRegIni = config_override.get("nonRegressionTestIni", nonRegIni) + if 'nonRegressionTestIni' in config_override: + del config_override['nonRegressionTestIni'] + + # apply config + idefixTest.applyConfig(config_override) + + # recompile if needed + if configure_and_compile: + idefixTest.configure(override=config_override, definitionFile=definitionFile) + idefixTest.compile() + + if first_run_ini: + idefixTest.run(inputFile=first_run_ini) + if nonReg: + idefixTest.nonRegressionTest(filename=first_run_dumpname, tolerance=tolerance) + + # Test the restart option + file_mtime={} + if not idefixTest.fake: + for file in idefixTest.restart_no_overwrite: + file_mtime[file] = os.path.getmtime(file) + + # restart + if idefixTest.restart: + restart = 1 + else: + restart = -1 + + # run + idefixTest.run(inputFile=ini, restart=restart) + + # regen ref if needed + if idefixTest.init: + idefixTest.makeReference(filename=dumpname) + + # check outputs + if standardTest: + idefixTest.standardTest() + if nonReg: + if nonRegIni: + idefixTest.inifile = nonRegIni + idefixTest.nonRegressionTest(filename=dumpname, tolerance=tolerance) + + # check that we didn't overrite the file during the restart + if not idefixTest.fake: + for file in idefixTest.restart_no_overwrite: + assert file_mtime[file] == os.path.getmtime(file), f"Dump file {file} was overwritten on restart" + + def main(self, all: bool = False): + if all: + sys.argv.append("-all") + idefixTest = tst.idfxTest(self.parentScritFile, name="main") + os.environ["IDEFIX_TEST_FILTER_SUBDIR"] = idefixTest.filterSubdir + + if idefixTest.all: + pytest.main(['-v', '--no-header', '--junit-xml=idefix-tests.junit.xml', '--tb=short'] + idefixTest.remainingArgs + [self.parentScritFile]) + else: + assert False, "Not yet supported !" + #elif self.check: + # idefixTest.checkOnly(filename=dumpname, tolerance=tolerance) + #else: + # for ini in ini_list: + # self.runNonRegression(dumpname, ini, {}, tolerance=tolerance) diff --git a/pytools/pytest.ini b/pytools/pytest.ini new file mode 100644 index 000000000..d1f826516 --- /dev/null +++ b/pytools/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +python_files="test_*.py" diff --git a/pytools/tests/__init__.py b/pytools/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pytools/tests/test/pb1/testme.json b/pytools/tests/test/pb1/testme.json new file mode 100644 index 000000000..d8ddad44f --- /dev/null +++ b/pytools/tests/test/pb1/testme.json @@ -0,0 +1,11 @@ +{ + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": true, + "reconstruction": 2, + "tolerance": 1e-14 + } + ] +} diff --git a/pytools/tests/test/pb2/testme.json b/pytools/tests/test/pb2/testme.json new file mode 100644 index 000000000..96c1753ab --- /dev/null +++ b/pytools/tests/test/pb2/testme.json @@ -0,0 +1,18 @@ +{ + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": true, + "reconstruction": 2, + "tolerance": 1e-14 + }, + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": false, + "reconstruction": 2, + "tolerance": 1e-14 + } + ] +} diff --git a/pytools/tests/test_idfx_test_gen.py b/pytools/tests/test_idfx_test_gen.py new file mode 100644 index 000000000..fc4f6261e --- /dev/null +++ b/pytools/tests/test_idfx_test_gen.py @@ -0,0 +1,208 @@ +##################################################################################### +# Idefix MHD astrophysical code +# Copyright(C) Sébastien Valat +# and other code contributors +# Licensed under CeCILL 2.1 License, see COPYING for more information +##################################################################################### + +from ..idfx_test_gen import IdefixDirTestGenerator +import pytest + +def test_extractNamingParameters(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + lst = gen.extractNamingParameters([ + { + "dec": [1,2,3], + "a": 10, + "b": 11, + "c": [True, False] + }, + { + "dec": [1,2,4], + "a": 10, + "b": 12, + "d": [True, False] + }, + ]) + + # valid + assert lst == 'b,c,d' + +def test_genNextLevelCombinations(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # init with single element + core = {} + init = [core] + + # gen first set of combinations + lst1 = gen._genNextLevelCombinations(init, "mpi", [True, False]) + assert lst1 == [ + {"mpi": True}, + {"mpi": False}, + ] + + # gen first second set of combinations + lst3 = gen._genNextLevelCombinations(lst1, "name", ["a", "b"]) + assert lst3 == [ + {"mpi": True, "name": "a"}, + {"mpi": True, "name": "b"}, + {"mpi": False, "name": "a"}, + {"mpi": False, "name": "b"}, + ] + +def test_genOneConfigSeries(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # gen + result = gen._genOneConfigSeries("mpi,name", { + "mpi": [True, False], + "name": ["a", "b"] + }) + + # check + assert result == [ + { + 'mpi': True, + 'name': 'a', + },{ + 'mpi': True, + 'name': 'b', + },{ + 'mpi': False, + 'name': 'a', + },{ + 'mpi': False, + 'name': 'b', + }, + ] + +def test_matchWhenClause_single(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # calls + assert gen._matchWhenClause({"a":10, "b": 11}, {"a":10}) == True + assert gen._matchWhenClause({"a":10, "b": 11}, {"a":11}) == False + assert gen._matchWhenClause({"a":10, "b": 11}, {"b":11}) == True + assert gen._matchWhenClause({"a":10, "b": 11}, {"b":10}) == False + +def test_matchWhenClause_and(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # calls + assert gen._matchWhenClause({"a":10, "b": 11}, {"a":10, "b":11}) == True + assert gen._matchWhenClause({"a":10, "b": 11}, {"a":11, "b":11}) == False + +def test_applyWhen_none(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # call when clause + res = gen._applyWhen({ + "a": 10, + "b": 11 + }, when={}) + + # check result + assert res == {"a": 10, "b": 11} + +def test_applyWhen_single_apply_yes(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # call when clause + res = gen._applyWhen({ + "a": 10, + "b": 11 + }, when={ + "conditions": { + "a": 10, + }, + "apply": { + "b": 12 + } + }) + + # check result + assert res == {"a": 10, "b": 12} + +def test_applyWhen_single_apply_no(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # call when clause + res = gen._applyWhen({ + "a": 10, + "b": 11 + }, when={ + "conditions": { + "a": 9, + }, + "apply": { + "b": 12 + } + }) + + # check result + assert res == {"a": 10, "b": 11} + +def test_applyWhen_list_apply_yes(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # call when clause + res = gen._applyWhen({ + "a": 10, + "b": 11 + }, when=[ + { + "conditions": { + "a": 10, + }, + "apply": { + "b": 12 + } + }, + { + "conditions": { + "a": 13, + }, + "apply": { + "b": 13 + } + } + ]) + + # check result + assert res == {"a": 10, "b": 12} + +def test_gen_full(): + # build generator + gen = IdefixDirTestGenerator(__file__, "unit-test") + + # gen + result = gen.genTestConfigs("mpi,name", { + "mpi": [True, False], + "name": ["a", "b"] + }, { + "conditions": { + "mpi": True + }, + "apply": { + "extra": 10, + } + }) + + # check + assert result == [ + pytest.param({'mpi': True, 'name': 'a', 'testfile': __file__, 'testname': 'unit-test', 'extra': 10}, id='unit-test-mpi-a'), + pytest.param({'mpi': True, 'name': 'b', 'testfile': __file__, 'testname': 'unit-test', 'extra': 10}, id='unit-test-mpi-b'), + pytest.param({'mpi': False, 'name': 'a', 'testfile': __file__, 'testname': 'unit-test'}, id='unit-test-a'), + pytest.param({'mpi': False, 'name': 'b', 'testfile': __file__, 'testname': 'unit-test'}, id='unit-test-b'), + ] diff --git a/pytools/tests/test_idfx_test_run.py b/pytools/tests/test_idfx_test_run.py new file mode 100644 index 000000000..8843705ee --- /dev/null +++ b/pytools/tests/test_idfx_test_run.py @@ -0,0 +1,88 @@ +##################################################################################### +# Idefix MHD astrophysical code +# Copyright(C) Sébastien Valat +# and other code contributors +# Licensed under CeCILL 2.1 License, see COPYING for more information +##################################################################################### + +from ..idfx_test_run import IdexPytestRunner +import pytest +import os + +def test_genTests(): + # build runner + runner = IdexPytestRunner(__file__) + + # dir + dir = os.path.dirname(__file__) + + # generate + result = runner.genTests() + assert result == [ + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': True, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix.ini', + 'testfile': dir + '/test/pb1/testme.json', + 'testname': 'pb1' + }, marks=(), id='pb1-idefix.ini' + ), + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': True, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix-implicit.ini', + 'testfile': dir + '/test/pb1/testme.json', + 'testname': 'pb1' + }, marks=(), id='pb1-idefix-implicit.ini' + ), + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': True, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix.ini', + 'testfile': dir + '/test/pb2/testme.json', + 'testname': 'pb2' + }, marks=(), id='pb2-idefix.ini-noplot' + ), + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': True, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix-implicit.ini', + 'testfile': dir + '/test/pb2/testme.json', + 'testname': 'pb2' + }, marks=(), id='pb2-idefix-implicit.ini-noplot' + ), + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': False, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix.ini', + 'testfile': dir + '/test/pb2/testme.json', + 'testname': 'pb2' + }, marks=(), id='pb2-idefix.ini' + ), + pytest.param( + { + 'dumpname': 'dump.0001.dmp', + 'noplot': False, + 'reconstruction': 2, + 'tolerance': 1e-14, + 'ini': 'idefix-implicit.ini', + 'testfile': dir + '/test/pb2/testme.json', + 'testname': 'pb2' + }, marks=(), id='pb2-idefix-implicit.ini' + ), + ] diff --git a/reference b/reference index c4082b99a..32499c14c 160000 --- a/reference +++ b/reference @@ -1 +1 @@ -Subproject commit c4082b99a4c542def3177c96cb35b1c9d9002f18 +Subproject commit 32499c14cfacd05e1c9499bd23198a53c416c3c2 diff --git a/src/dataBlock/dataBlock.cpp b/src/dataBlock/dataBlock.cpp index 81874cef6..2877a8346 100644 --- a/src/dataBlock/dataBlock.cpp +++ b/src/dataBlock/dataBlock.cpp @@ -367,6 +367,7 @@ real DataBlock::ComputeTimestep() { void DataBlock::DeriveVectorPotential() { if constexpr(DefaultPhysics::mhd) { #ifdef EVOLVE_VECTOR_POTENTIAL + hydro->emf->EnforceVectorPotentialBoundary(hydro->Ve); hydro->emf->ComputeMagFieldFromA(hydro->Ve, hydro->Vs); #endif } diff --git a/src/dataBlock/dataBlockHost.cpp b/src/dataBlock/dataBlockHost.cpp index d570648fa..c7811b391 100644 --- a/src/dataBlock/dataBlockHost.cpp +++ b/src/dataBlock/dataBlockHost.cpp @@ -21,11 +21,11 @@ DataBlockHost::DataBlockHost(DataBlock& datain) { // Create mirrors (should be mirror_view) for(int dir = 0 ; dir < 3 ; dir++) { - x[dir] = Kokkos::create_mirror_view(data->x[dir]); - xr[dir] = Kokkos::create_mirror_view(data->xr[dir]); - xl[dir] = Kokkos::create_mirror_view(data->xl[dir]); - dx[dir] = Kokkos::create_mirror_view(data->dx[dir]); - A[dir] = Kokkos::create_mirror_view(data->A[dir]); + x[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->x[dir]); + xr[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->xr[dir]); + xl[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->xl[dir]); + dx[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->dx[dir]); + A[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->A[dir]); } np_tot = data->np_tot; @@ -47,30 +47,30 @@ DataBlockHost::DataBlockHost(DataBlock& datain) { // TO BE COMPLETED... - dV = Kokkos::create_mirror_view(data->dV); - Vc = Kokkos::create_mirror_view(data->hydro->Vc); - Uc = Kokkos::create_mirror_view(data->hydro->Uc); - InvDt = Kokkos::create_mirror_view(data->hydro->InvDt); + dV = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->dV); + Vc = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->Vc); + Uc = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->Uc); + InvDt = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->InvDt); #if MHD == YES - Vs = Kokkos::create_mirror_view(data->hydro->Vs); + Vs = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->Vs); this->haveCurrent = data->hydro->haveCurrent; if(data->hydro->haveCurrent) { - J = Kokkos::create_mirror_view(data->hydro->J); + J = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->J); } #ifdef EVOLVE_VECTOR_POTENTIAL - Ve = Kokkos::create_mirror_view(data->hydro->Ve); + Ve = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->Ve); #endif - D_EXPAND( Ex3 = Kokkos::create_mirror_view(data->hydro->emf->ez); , + D_EXPAND( Ex3 = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->emf->ez); , , - Ex1 = Kokkos::create_mirror_view(data->hydro->emf->ex); - Ex2 = Kokkos::create_mirror_view(data->hydro->emf->ey); ) + Ex1 = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->emf->ex); + Ex2 = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->hydro->emf->ey); ) #endif if(haveDust) { dustVc = std::vector>(data->dust.size()); for(int i = 0 ; i < data->dust.size() ; i++) { - dustVc[i] = Kokkos::create_mirror_view(data->dust[i]->Vc); + dustVc[i] = Kokkos::create_mirror_view(Kokkos::HostSpace(), data->dust[i]->Vc); } } @@ -80,7 +80,8 @@ DataBlockHost::DataBlockHost(DataBlock& datain) { this->coarseningDirection = data->coarseningDirection; for(int dir = 0 ; dir < 3 ; dir++) { if(coarseningDirection[dir]) { - coarseningLevel[dir] = Kokkos::create_mirror_view(data->coarseningLevel[dir]); + coarseningLevel[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), + data->coarseningLevel[dir]); } } } diff --git a/src/dataBlock/dumpToFile.cpp b/src/dataBlock/dumpToFile.cpp index 35b60b81d..51347e6ed 100644 --- a/src/dataBlock/dumpToFile.cpp +++ b/src/dataBlock/dumpToFile.cpp @@ -45,14 +45,15 @@ void DataBlock::DumpToFile(std::string filebase) { // TODO(lesurg) Make datablock a friend of hydro to get the Riemann flux? - //IdefixArray4D::HostMirror locFlux = Kokkos::create_mirror_view(this->hydro->FluxRiemann); + //IdefixArray4D::HostMirror locFlux = Kokkos::create_mirror_view(Kokkos::HostSpace(), + // this->hydro->FluxRiemann); //Kokkos::deep_copy(locFlux, this->FluxRiemann); #if MHD == YES IdefixArray4D::HostMirror locJ; if(hydro->haveCurrent) { - locJ = Kokkos::create_mirror_view(this->hydro->J); + locJ = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->hydro->J); Kokkos::deep_copy(locJ, this->hydro->J); } #endif @@ -120,7 +121,8 @@ void DataBlock::DumpToFile(std::string filebase) { // Write Vs #if MHD == YES // Write Vs - IdefixArray4D::HostMirror locVs = Kokkos::create_mirror_view(this->hydro->Vs); + IdefixArray4D::HostMirror locVs = Kokkos::create_mirror_view(Kokkos::HostSpace(), + this->hydro->Vs); Kokkos::deep_copy(locVs,this->hydro->Vs); dims[0] = this->np_tot[IDIR]+IOFFSET; dims[1] = this->np_tot[JDIR]+JOFFSET; @@ -137,7 +139,8 @@ void DataBlock::DumpToFile(std::string filebase) { dims[2] = this->np_tot[KDIR]; std::snprintf(fieldName,NAMESIZE,"Ex3"); - IdefixArray3D::HostMirror locE = Kokkos::create_mirror_view(this->hydro->emf->ez); + IdefixArray3D::HostMirror locE = Kokkos::create_mirror_view(Kokkos::HostSpace(), + this->hydro->emf->ez); Kokkos::deep_copy(locE,this->hydro->emf->ez); WriteVariable(fileHdl, 3, dims, fieldName, locE.data()); @@ -152,7 +155,8 @@ void DataBlock::DumpToFile(std::string filebase) { if(hydro->haveCurrent) { - IdefixArray4D::HostMirror locJ = Kokkos::create_mirror_view(this->hydro->J); + IdefixArray4D::HostMirror locJ = Kokkos::create_mirror_view(Kokkos::HostSpace(), + this->hydro->J); Kokkos::deep_copy(locJ,this->hydro->J); dims[0] = this->np_tot[IDIR]; dims[1] = this->np_tot[JDIR]; diff --git a/src/dataBlock/fargo.cpp b/src/dataBlock/fargo.cpp index a509ef886..7aa2d26a5 100644 --- a/src/dataBlock/fargo.cpp +++ b/src/dataBlock/fargo.cpp @@ -138,10 +138,15 @@ Fargo::Fargo(Input &input, int nmax, DataBlock *data) { for(int i=0 ; i < nvar ; i++) { vars.push_back(i); } + #if GEOMETRY == CARTESIAN || GEOMETRY == POLAR + const int dirShift = JDIR; + #elif GEOMETRY == SPHERICAL + const int dirShift = KDIR; + #endif #if MHD == YES - this->mpi.Init(data->mygrid, vars, this->nghost.data(), data->np_int.data(), true); + this->mpiExchanger.Init(data->mygrid, dirShift, vars, this->nghost, data->np_int, true); #else - this->mpi.Init(data->mygrid, vars, this->nghost.data(), data->np_int.data()); + this->mpiExchanger.Init(data->mygrid, dirShift, vars, this->nghost, data->np_int, false); #endif } #endif diff --git a/src/dataBlock/fargo.hpp b/src/dataBlock/fargo.hpp index 099c99350..a1efcb2b6 100644 --- a/src/dataBlock/fargo.hpp +++ b/src/dataBlock/fargo.hpp @@ -11,7 +11,7 @@ #include #include "idefix.hpp" #ifdef WITH_MPI - #include "mpi.hpp" + #include "exchanger.hpp" #endif #include "physics.hpp" @@ -63,7 +63,7 @@ class Fargo { IdefixArray4D scrhVs; #ifdef WITH_MPI - Mpi mpi; // Fargo-specific MPI layer + Exchanger mpiExchanger; // Fargo-specific MPI layer #endif std::array beg; @@ -290,11 +290,7 @@ void Fargo::StoreToScratch(Fluid* hydro) { } #if WITH_MPI if(haveDomainDecomposition) { - #if GEOMETRY == CARTESIAN || GEOMETRY == POLAR - this->mpi.ExchangeX2(scrhUc, scrhVs); - #elif GEOMETRY == SPHERICAL - this->mpi.ExchangeX3(scrhUc, scrhVs); - #endif + this->mpiExchanger.Exchange(scrhUc, scrhVs); } #endif } diff --git a/src/fluid/RiemannSolver/MHDsolvers/storeFlux.hpp b/src/fluid/RiemannSolver/MHDsolvers/storeFlux.hpp index c50cadb83..921a312ac 100644 --- a/src/fluid/RiemannSolver/MHDsolvers/storeFlux.hpp +++ b/src/fluid/RiemannSolver/MHDsolvers/storeFlux.hpp @@ -87,9 +87,9 @@ KOKKOS_FORCEINLINE_FUNCTION void K_StoreHLLD( const int i, const int j, const in const IdefixArray3D &aR, const IdefixArray3D &dL, const IdefixArray3D &dR) { - EXPAND( const int Xn = DIR+MX1; , - const int Xt = (DIR == IDIR ? MX2 : MX1); , - const int Xb = (DIR == KDIR ? MX2 : MX3); ) + EXPAND( const int Xn = DIR+MX1; , + const int Xt = (DIR == IDIR ? MX2 : MX1); , + [[maybe_unused]] const int Xb = (DIR == KDIR ? MX2 : MX3); ) // Compute magnetic pressure [[maybe_unused]] real ptR, ptL; @@ -201,7 +201,7 @@ KOKKOS_FORCEINLINE_FUNCTION void K_StoreHLLD( const int i, const int j, const in } #if COMPONENTS > 1 - EXPAND( Et(k,j,i) = -st*(ar*vL[Xt] - al*vR[Xt])*scrh; , + D_EXPAND( Et(k,j,i) = -st*(ar*vL[Xt] - al*vR[Xt])*scrh; , , Eb(k,j,i) = -sb*(ar*vL[Xb] - al*vR[Xb])*scrh; ); #endif diff --git a/src/fluid/boundary/axis.cpp b/src/fluid/boundary/axis.cpp index e447aefc8..47f0c2954 100644 --- a/src/fluid/boundary/axis.cpp +++ b/src/fluid/boundary/axis.cpp @@ -22,45 +22,36 @@ void Axis::ShowConfig() { } -void Axis::SymmetrizeEx1Side(int jref) { +void Axis::SymmetrizeEx1Side(int jref, IdefixArray3D Ex1) { #if DIMENSIONS == 3 - IdefixArray3D Ex1 = this->ex; IdefixArray1D Ex1Avg = this->Ex1Avg; - if(isTwoPi) { - idefix_for("Ex1_ini",0,data->np_tot[IDIR], - KOKKOS_LAMBDA(int i) { - Ex1Avg(i) = ZERO_F; - }); - - idefix_for("Ex1_Symmetrize",data->beg[KDIR],data->end[KDIR],0,data->np_tot[IDIR], - KOKKOS_LAMBDA(int k,int i) { - Kokkos::atomic_add(&Ex1Avg(i), Ex1(k,jref,i)); + idefix_for("Ex1_ini",0,data->np_tot[IDIR], + KOKKOS_LAMBDA(int i) { + Ex1Avg(i) = ZERO_F; }); - if(needMPIExchange) { - #ifdef WITH_MPI - Kokkos::fence(); - // sum along all of the processes on the same r - MPI_Allreduce(MPI_IN_PLACE, Ex1Avg.data(), data->np_tot[IDIR], realMPI, - MPI_SUM, data->mygrid->AxisComm); - #endif - } - int ncells=data->mygrid->np_int[KDIR]; - - idefix_for("Ex1_Store",0,data->np_tot[KDIR],0,data->np_tot[IDIR], - KOKKOS_LAMBDA(int k,int i) { - Ex1(k,jref,i) = Ex1Avg(i)/((real) ncells); - }); - } else { - // if we're not doing full two pi, the flow is symmetric with respect to the axis, and the axis - // EMF is simply zero - idefix_for("Ex1_Store",0,data->np_tot[KDIR],0,data->np_tot[IDIR], + idefix_for("Ex1_Symmetrize",data->beg[KDIR],data->end[KDIR],0,data->np_tot[IDIR], KOKKOS_LAMBDA(int k,int i) { - Ex1(k,jref,i) = ZERO_F; + Kokkos::atomic_add(&Ex1Avg(i), Ex1(k,jref,i)); }); + if(needMPIExchange) { + #ifdef WITH_MPI + Kokkos::fence(); + // sum along all of the processes on the same r + MPI_Allreduce(MPI_IN_PLACE, Ex1Avg.data(), data->np_tot[IDIR], realMPI, + MPI_SUM, data->mygrid->AxisComm); + #endif } + + int ncells=data->mygrid->np_int[KDIR]; + + idefix_for("Ex1_Store",0,data->np_tot[KDIR],0,data->np_tot[IDIR], + KOKKOS_LAMBDA(int k,int i) { + Ex1(k,jref,i) = Ex1Avg(i)/((real) ncells); + }); + #endif } @@ -71,9 +62,7 @@ void Axis::SymmetrizeEx1Side(int jref) { // Hence, we enforce a regularisation of Ex3 for consistancy. -void Axis::RegularizeEx3side(int jref) { - IdefixArray3D Ex3 = this->ez; - +void Axis::RegularizeEx3side(int jref, IdefixArray3D Ex3) { idefix_for("Ex3_Regularise",0,data->np_tot[KDIR],0,data->np_tot[IDIR], KOKKOS_LAMBDA(int k,int i) { Ex3(k,jref,i) = 0.0; @@ -100,9 +89,9 @@ void Axis::RegularizeCurrentSide(int side) { sign = -1; } IdefixArray1D BAvg = this->Ex1Avg; - IdefixArray1D x2 = data->x[JDIR]; IdefixArray1D x1 = data->x[IDIR]; IdefixArray1D dx3 = data->dx[KDIR]; + IdefixArray1D dx2 = data->dx[JDIR]; idefix_for("B_ini",0,data->np_tot[IDIR], KOKKOS_LAMBDA(int i) { @@ -132,8 +121,7 @@ void Axis::RegularizeCurrentSide(int side) { idefix_for("fixJ",0,data->np_tot[KDIR],0,data->np_tot[IDIR], KOKKOS_LAMBDA(int k,int i) { - real th = x2(jc); - real fact = 2*sign/(deltaPhi*x1(i)*sin(th)); + real fact = 2*sign/(deltaPhi*x1(i)*dx2(jc)); J(IDIR, k,js,i) = BAvg(i)*fact; }); @@ -142,18 +130,20 @@ void Axis::RegularizeCurrentSide(int side) { // Average the Emf component along the axis -void Axis::RegularizeEMFs() { +void Axis::RegularizeEMFs(IdefixArray3D ex, + IdefixArray3D ey, + IdefixArray3D ez) { idfx::pushRegion("Axis::RegularizeEMFs"); if(this->axisLeft) { int jref = data->beg[JDIR]; - SymmetrizeEx1Side(jref); - RegularizeEx3side(jref); + SymmetrizeEx1Side(jref, ex); + RegularizeEx3side(jref, ez); } if(this->axisRight) { int jref = data->end[JDIR]; - SymmetrizeEx1Side(jref); - RegularizeEx3side(jref); + SymmetrizeEx1Side(jref, ex); + RegularizeEx3side(jref, ez); } idfx::popRegion(); diff --git a/src/fluid/boundary/axis.hpp b/src/fluid/boundary/axis.hpp index b74697862..1a1227b61 100644 --- a/src/fluid/boundary/axis.hpp +++ b/src/fluid/boundary/axis.hpp @@ -26,15 +26,16 @@ class Axis { public: template explicit Axis(Boundary *); // Initialisation - void RegularizeEMFs(); // Regularize the EMF sitting on the axis + void RegularizeEMFs(IdefixArray3D, IdefixArray3D, IdefixArray3D); + // Regularize the EMF sitting on the axis void RegularizeCurrent(); // Regularize the currents along the axis void EnforceAxisBoundary(int side); // Enforce the boundary conditions (along X2) void RegularizeBX2s(); // Regularize BX2s on the axis void ShowConfig(); - - void SymmetrizeEx1Side(int); // Symmetrize on a specific side (internal method) - void RegularizeEx3side(int); // Regularize Ex3 along the axis (internal method) + // Internal methods + void SymmetrizeEx1Side(int, IdefixArray3D); // Symmetrize on a specific side + void RegularizeEx3side(int side, IdefixArray3D ex3); // Regularize Ex3 along the axis void RegularizeCurrentSide(int); // Regularize J along the axis (internal method) void FixBx2sAxis(int side); // Fix BX2s on the axis using the field around it (internal) void FixBx2sAxisGhostAverage(int side); //Fix BX2s on the axis using the average of neighbouring @@ -76,9 +77,6 @@ class Axis { IdefixArray1D symmetryVc; IdefixArray1D symmetryVs; - IdefixArray3D ex; - IdefixArray3D ey; - IdefixArray3D ez; IdefixArray4D J; IdefixArray4D Vc; @@ -94,11 +92,6 @@ Axis::Axis(Boundary *boundary) { Vc = boundary->Vc; Vs = boundary->Vs; J = boundary->fluid->J; - if constexpr(Phys::mhd) { - ex = boundary->fluid->emf->ex; - ey = boundary->fluid->emf->ey; - ez = boundary->fluid->emf->ez; - } data = boundary->data; haveMHD = Phys::mhd; diff --git a/src/fluid/boundary/boundary.hpp b/src/fluid/boundary/boundary.hpp index 897c1ce10..4065cc60d 100644 --- a/src/fluid/boundary/boundary.hpp +++ b/src/fluid/boundary/boundary.hpp @@ -34,6 +34,8 @@ template using InternalBoundaryFunc = void (*) (Fluid *, const real t); using InternalBoundaryFuncOld = void (*) (DataBlock &, const real t); // DEPRECATED +using BoundingBox = std::array,3>; + template class Boundary { public: @@ -83,12 +85,22 @@ class Boundary { const BoundarySide &, Function ); + template + void BoundaryFor(const std::string &, + BoundingBox box, + Function ); + template void BoundaryForAll(const std::string &, const int &, const BoundarySide &, Function ); + template + void BoundaryForAll(const std::string &, + BoundingBox box, + Function ); + template void BoundaryForX1s(const std::string &, const int &, @@ -115,6 +127,10 @@ class Boundary { bool haveLeftAxis{false}; ///< True if the left boundary is an axis bool haveRightAxis{false}; ///< True if the right boundary is an axis + std::array,3> GhostBoxVc; ///< A bounding box for each ghost regions + std::array,3>,3> + GhostBoxVs; ///< A bounding box each Vs component + private: friend class Axis; Fluid *fluid; // pointer to parent hydro object @@ -154,6 +170,49 @@ Boundary::Boundary(Fluid* fluid) { data->nghost[IDIR]); } + // Initialise the Bounding Boxes for cell-centered variables + for(int dir = 0 ; dir < 3 ; dir++) { + // dir=direction along which we plan to apply the boundary conditions + for(int side = 0; side < 2 ; side++) { + // Side on which we apply the boundaries + for(int dim = 0 ; dim < 3 ; dim++) { + // Dimension of the datacube + if(dim != dir) { + GhostBoxVc[dir][side][dim][0] = 0; + GhostBoxVc[dir][side][dim][1] = data->np_tot[dim]; + } else { + GhostBoxVc[dir][side][dim][0] = side*(data->end[dim]); + GhostBoxVc[dir][side][dim][1] = side*(data->end[dim])+data->nghost[dim]; + } + } + } + } + + // Initialise the Bounding Boxes for face-centered variables (NB: we need one for each component) + for(int component = 0 ; component < DIMENSIONS ; component++) { + // Initialise the boxes for face-centered variables with the same bounding box + GhostBoxVs[component] = GhostBoxVc; + for(int dir = 0 ; dir < 3 ; dir++) { + for(int side = 0 ; side < 2 ; side++) { + // Add one element in the normal direction since we're staggered + GhostBoxVs[component][dir][side][component][1] += 1; + // Do not overwrite last active BXs normal if not serial+periodic + if(dir == component) { + if(side==left) { + if(data->mygrid->nproc[dir] > 1 || data->lbound[dir] != BoundaryType::periodic) { + GhostBoxVs[component][dir][side][component][1] -= 1; + } + } + if(side==right) { + if(data->mygrid->nproc[dir] > 1 || data->rbound[dir] != BoundaryType::periodic) { + GhostBoxVs[component][dir][side][component][0] += 1; + } + } + } + } + } + } + // Init MPI stack when needed #ifdef WITH_MPI //////////////////////////////////////////////////////////////////////////// @@ -185,7 +244,8 @@ Boundary::Boundary(Fluid* fluid) { } } - mpi.Init(data->mygrid, mapVars, data->nghost.data(), data->np_int.data(), Phys::mhd); + mpi.Init(data->mygrid, mapVars, data->nghost, data->np_int, + data->lbound, data->rbound, Phys::mhd); #endif // MPI idfx::popRegion(); @@ -970,54 +1030,47 @@ template template inline void Boundary::BoundaryForAll( const std::string & name, - const int &dir, - const BoundarySide &side, + BoundingBox box, Function function) { - const int nxi = data->np_int[IDIR]; - const int nxj = data->np_int[JDIR]; - const int nxk = data->np_int[KDIR]; - - const int ighost = data->nghost[IDIR]; - const int jghost = data->nghost[JDIR]; - const int kghost = data->nghost[KDIR]; - - // Boundaries of the loop - const int ibeg = (dir == IDIR) ? side*(ighost+nxi) : 0; - const int iend = (dir == IDIR) ? ighost + side*(ighost+nxi) : data->np_tot[IDIR]; - const int jbeg = (dir == JDIR) ? side*(jghost+nxj) : 0; - const int jend = (dir == JDIR) ? jghost + side*(jghost+nxj) : data->np_tot[JDIR]; - const int kbeg = (dir == KDIR) ? side*(kghost+nxk) : 0; - const int kend = (dir == KDIR) ? kghost + side*(kghost+nxk) : data->np_tot[KDIR]; - - idefix_for(name, 0, this->nVar, kbeg, kend, jbeg, jend, ibeg, iend, function); + idefix_for(name, 0, this->nVar, + box[KDIR][0], box[KDIR][1], + box[JDIR][0], box[JDIR][1], + box[IDIR][0], box[IDIR][1], + function); } template template -inline void Boundary::BoundaryFor( +inline void Boundary::BoundaryForAll( const std::string & name, const int &dir, const BoundarySide &side, Function function) { - const int nxi = data->np_int[IDIR]; - const int nxj = data->np_int[JDIR]; - const int nxk = data->np_int[KDIR]; - - const int ighost = data->nghost[IDIR]; - const int jghost = data->nghost[JDIR]; - const int kghost = data->nghost[KDIR]; - - // Boundaries of the loop - const int ibeg = (dir == IDIR) ? side*(ighost+nxi) : 0; - const int iend = (dir == IDIR) ? ighost + side*(ighost+nxi) : data->np_tot[IDIR]; - const int jbeg = (dir == JDIR) ? side*(jghost+nxj) : 0; - const int jend = (dir == JDIR) ? jghost + side*(jghost+nxj) : data->np_tot[JDIR]; - const int kbeg = (dir == KDIR) ? side*(kghost+nxk) : 0; - const int kend = (dir == KDIR) ? kghost + side*(kghost+nxk) : data->np_tot[KDIR]; + BoundaryForAll(name,GhostBoxVc[dir][side],function); +} +template +template +inline void Boundary::BoundaryFor( + const std::string & name, + BoundingBox box, + Function function) { + idefix_for(name, + box[KDIR][0], box[KDIR][1], + box[JDIR][0], box[JDIR][1], + box[IDIR][0], box[IDIR][1], + function); +} - idefix_for(name, kbeg, kend, jbeg, jend, ibeg, iend, function); +template +template +inline void Boundary::BoundaryFor( + const std::string & name, + const int &dir, + const BoundarySide &side, + Function function) { + BoundaryFor(name,GhostBoxVc[dir][side],function); } template @@ -1027,23 +1080,7 @@ inline void Boundary::BoundaryForX1s( const int &dir, const BoundarySide &side, Function function) { - const int nxi = data->np_int[IDIR]+1; - const int nxj = data->np_int[JDIR]; - const int nxk = data->np_int[KDIR]; - - const int ighost = data->nghost[IDIR]; - const int jghost = data->nghost[JDIR]; - const int kghost = data->nghost[KDIR]; - - // Boundaries of the loop - const int ibeg = (dir == IDIR) ? side*(ighost+nxi) : 0; - const int iend = (dir == IDIR) ? ighost + side*(ighost+nxi) : data->np_tot[IDIR]+1; - const int jbeg = (dir == JDIR) ? side*(jghost+nxj) : 0; - const int jend = (dir == JDIR) ? jghost + side*(jghost+nxj) : data->np_tot[JDIR]; - const int kbeg = (dir == KDIR) ? side*(kghost+nxk) : 0; - const int kend = (dir == KDIR) ? kghost + side*(kghost+nxk) : data->np_tot[KDIR]; - - idefix_for(name, kbeg, kend, jbeg, jend, ibeg, iend, function); + BoundaryFor(name,GhostBoxVs[BX1s][dir][side],function); } template @@ -1053,23 +1090,7 @@ inline void Boundary::BoundaryForX2s( const int &dir, const BoundarySide &side, Function function) { - const int nxi = data->np_int[IDIR]; - const int nxj = data->np_int[JDIR]+1; - const int nxk = data->np_int[KDIR]; - - const int ighost = data->nghost[IDIR]; - const int jghost = data->nghost[JDIR]; - const int kghost = data->nghost[KDIR]; - - // Boundaries of the loop - const int ibeg = (dir == IDIR) ? side*(ighost+nxi) : 0; - const int iend = (dir == IDIR) ? ighost + side*(ighost+nxi) : data->np_tot[IDIR]; - const int jbeg = (dir == JDIR) ? side*(jghost+nxj) : 0; - const int jend = (dir == JDIR) ? jghost + side*(jghost+nxj) : data->np_tot[JDIR]+1; - const int kbeg = (dir == KDIR) ? side*(kghost+nxk) : 0; - const int kend = (dir == KDIR) ? kghost + side*(kghost+nxk) : data->np_tot[KDIR]; - - idefix_for(name, kbeg, kend, jbeg, jend, ibeg, iend, function); + BoundaryFor(name,GhostBoxVs[BX2s][dir][side],function); } template @@ -1079,23 +1100,7 @@ inline void Boundary::BoundaryForX3s( const int &dir, const BoundarySide &side, Function function) { - const int nxi = data->np_int[IDIR]; - const int nxj = data->np_int[JDIR]; - const int nxk = data->np_int[KDIR]+1; - - const int ighost = data->nghost[IDIR]; - const int jghost = data->nghost[JDIR]; - const int kghost = data->nghost[KDIR]; - - // Boundaries of the loop - const int ibeg = (dir == IDIR) ? side*(ighost+nxi) : 0; - const int iend = (dir == IDIR) ? ighost + side*(ighost+nxi) : data->np_tot[IDIR]; - const int jbeg = (dir == JDIR) ? side*(jghost+nxj) : 0; - const int jend = (dir == JDIR) ? jghost + side*(jghost+nxj) : data->np_tot[JDIR]; - const int kbeg = (dir == KDIR) ? side*(kghost+nxk) : 0; - const int kend = (dir == KDIR) ? kghost + side*(kghost+nxk) : data->np_tot[KDIR]+1; - - idefix_for(name, kbeg, kend, jbeg, jend, ibeg, iend, function); + BoundaryFor(name,GhostBoxVs[BX3s][dir][side],function); } diff --git a/src/fluid/calcRightHandSide.hpp b/src/fluid/calcRightHandSide.hpp index 7777d1c22..0a0d3e194 100644 --- a/src/fluid/calcRightHandSide.hpp +++ b/src/fluid/calcRightHandSide.hpp @@ -363,7 +363,7 @@ struct Fluid_CalcRHSFunctor { #if (GEOMETRY == SPHERICAL) && (COMPONENTS == 3) rhs[iMPHI] /= FABS(sinx2(j)); if constexpr(Phys::mhd) { - rhs[iBPHI] = -dt / (rt(i)*dx(j)) * (Flux(iBPHI, k, j+1, i) - Flux(iBPHI, k, j, i)); + rhs[iBPHI] = -dt / (x1(i)*dx(j)) * (Flux(iBPHI, k, j+1, i) - Flux(iBPHI, k, j, i)); } // MHD #endif // GEOMETRY } diff --git a/src/fluid/checkDivB.hpp b/src/fluid/checkDivB.hpp index c78f6b3e0..6e47d4c8c 100644 --- a/src/fluid/checkDivB.hpp +++ b/src/fluid/checkDivB.hpp @@ -31,12 +31,9 @@ real Fluid::CheckDivB() { KOKKOS_LAMBDA (int k, int j, int i, real &divBmax) { [[maybe_unused]] real dB1,dB2,dB3; [[maybe_unused]] real d1, d2, d3; - [[maybe_unused]] real B1,B2,B3; dB1=dB2=dB3=ZERO_F; d1=d2=d3=ZERO_F; - B1=B2=B3=ZERO_F; - D_EXPAND( dB1=(Ax1(k,j,i+1)*Vs(BX1s,k,j,i+1)-Ax1(k,j,i)*Vs(BX1s,k,j,i)); , dB2=(Ax2(k,j+1,i)*Vs(BX2s,k,j+1,i)-Ax2(k,j,i)*Vs(BX2s,k,j,i)); , @@ -46,12 +43,8 @@ real Fluid::CheckDivB() { d2=0.5*(Ax2(k,j+1,i) + Ax2(k,j,i)); , d3=0.5*(Ax3(k+1,j,i) + Ax3(k,j,i)); ) - D_EXPAND( B1=0.5*(Vs(BX1s,k,j,i+1) + Vs(BX1s,k,j,i)); , - B2=0.5*(Vs(BX2s,k,j+1,i) + Vs(BX2s,k,j,i)); , - B3=0.5*(Vs(BX3s,k+1,j,i) + Vs(BX3s,k,j,i)); ) - real amplitude = 1e-40; - amplitude += D_EXPAND( std::fabs(B1)*d1, + std::fabs(B2)*d2, + std::fabs(B3)*d3 ); + const real amplitude = D_EXPAND( d1, + d2, + d3 ); divBmax=FMAX(FABS(D_EXPAND(dB1, +dB2, +dB3))/amplitude,divBmax); }, diff --git a/src/fluid/checkNan.hpp b/src/fluid/checkNan.hpp index 88e206e5e..0e8890725 100644 --- a/src/fluid/checkNan.hpp +++ b/src/fluid/checkNan.hpp @@ -67,7 +67,7 @@ int Fluid::CheckNan() { DataBlockHost dataHost(*data); - IdefixHostArray4D VcHost = Kokkos::create_mirror_view(this->Vc); + IdefixHostArray4D VcHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->Vc); Kokkos::deep_copy(VcHost,Vc); int nerrormax=10; @@ -95,7 +95,7 @@ int Fluid::CheckNan() { } if constexpr(Phys::mhd) { - IdefixHostArray4D VsHost = Kokkos::create_mirror_view(this->Vs); + IdefixHostArray4D VsHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->Vs); Kokkos::deep_copy(VsHost,Vs); for(int k = data->beg[KDIR] ; k < data->end[KDIR]+KOFFSET ; k++) { for(int j = data->beg[JDIR] ; j < data->end[JDIR]+JOFFSET ; j++) { diff --git a/src/fluid/constrainedTransport/CMakeLists.txt b/src/fluid/constrainedTransport/CMakeLists.txt index 06efc782b..7d99b805c 100644 --- a/src/fluid/constrainedTransport/CMakeLists.txt +++ b/src/fluid/constrainedTransport/CMakeLists.txt @@ -5,6 +5,7 @@ target_sources(idefix PUBLIC ${CMAKE_CURRENT_LIST_DIR}/constrainedTransport.hpp PUBLIC ${CMAKE_CURRENT_LIST_DIR}/EMFexchange.hpp PUBLIC ${CMAKE_CURRENT_LIST_DIR}/enforceEMFBoundary.hpp + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/enforceVectorPotentialBoundary.hpp PUBLIC ${CMAKE_CURRENT_LIST_DIR}/evolveMagField.hpp PUBLIC ${CMAKE_CURRENT_LIST_DIR}/evolveVectorPotential.hpp ) diff --git a/src/fluid/constrainedTransport/EMFexchange.hpp b/src/fluid/constrainedTransport/EMFexchange.hpp index 187186271..f6e90f75a 100644 --- a/src/fluid/constrainedTransport/EMFexchange.hpp +++ b/src/fluid/constrainedTransport/EMFexchange.hpp @@ -14,16 +14,18 @@ #ifdef WITH_MPI template -void ConstrainedTransport::ExchangeAll() { - if(data->mygrid->nproc[IDIR]>1) this->ExchangeX1(); - if(data->mygrid->nproc[JDIR]>1) this->ExchangeX2(); - if(data->mygrid->nproc[KDIR]>1) this->ExchangeX3(); +void ConstrainedTransport::ExchangeAll(IdefixArray3D ex, + IdefixArray3D ey, + IdefixArray3D ez) { + if(data->mygrid->nproc[IDIR]>1) this->ExchangeX1(ey,ez); + if(data->mygrid->nproc[JDIR]>1) this->ExchangeX2(ex,ez); + if(data->mygrid->nproc[KDIR]>1) this->ExchangeX3(ex,ey); } // Exchange EMFs in X1 template -void ConstrainedTransport::ExchangeX1() { +void ConstrainedTransport::ExchangeX1(IdefixArray3D ey, IdefixArray3D ez) { idfx::pushRegion("Emf::ExchangeX1"); @@ -34,8 +36,6 @@ void ConstrainedTransport::ExchangeX1() { IdefixArray1D BufferLeft=BufferSendX1[faceLeft]; IdefixArray1D BufferRight=BufferSendX1[faceRight]; - IdefixArray3D ey=this->ey; - IdefixArray3D ez=this->ez; // If MPI Persistent, start receiving even before the buffers are filled @@ -61,7 +61,6 @@ void ConstrainedTransport::ExchangeX1() { idefix_for("LoadBufferX1Emfz",kbeg,kend,jbeg,jend+1, KOKKOS_LAMBDA (int k, int j) { - BufferLeft( (j-jbeg) + (k-kbeg)*(ny+1) ) = ez(k,j,ileft); BufferRight( (j-jbeg) + (k-kbeg)*(ny+1) ) = ez(k,j,iright); } ); @@ -70,7 +69,6 @@ void ConstrainedTransport::ExchangeX1() { idefix_for("LoadBufferX1Emfy",kbeg,kend+1,jbeg,jend, KOKKOS_LAMBDA (int k, int j) { - BufferLeft( (j-jbeg) + (k-kbeg)*ny + Vsindex ) = ey(k,j,ileft); BufferRight( (j-jbeg) + (k-kbeg)*ny + Vsindex ) = ey(k,j,iright); } ); @@ -91,29 +89,21 @@ void ConstrainedTransport::ExchangeX1() { BufferLeft=BufferRecvX1[faceLeft]; BufferRight=BufferRecvX1[faceRight]; - // We average the edge emfs zones + // Erase the emf with the one coming from the left process + idefix_for("StoreBufferX1Emfz",kbeg,kend,jbeg,jend+1, KOKKOS_LAMBDA (int k, int j) { if(lbound == internal || lbound == periodic) { - ez(k,j,ileft) = HALF_F*( - BufferLeft( (j-jbeg) + (k-kbeg)*(ny+1) ) + ez(k,j,ileft) ); - } - if(rbound == internal || rbound == periodic) { - ez(k,j,iright) = HALF_F*( - BufferRight( (j-jbeg) + (k-kbeg)*(ny+1) ) + ez(k,j,iright) ); + ez(k,j,ileft) = BufferLeft( (j-jbeg) + (k-kbeg)*(ny+1)); } }); + #if DIMENSIONS == 3 Vsindex = (ny+1)*nz; idefix_for("StoreBufferX1Emfy",kbeg,kend+1,jbeg,jend, KOKKOS_LAMBDA (int k, int j) { if(lbound == internal || lbound == periodic) { - ey(k,j,ileft) = HALF_F*( - BufferLeft( (j-jbeg) + (k-kbeg)*ny +Vsindex) + ey(k,j,ileft) ); - } - if(rbound == internal || rbound == periodic) { - ey(k,j,iright) = HALF_F*( - BufferRight( (j-jbeg) + (k-kbeg)*ny +Vsindex) + ey(k,j,iright) ); + ey(k,j,ileft) = BufferLeft( (j-jbeg) + (k-kbeg)*ny +Vsindex); } }); #endif @@ -124,7 +114,7 @@ void ConstrainedTransport::ExchangeX1() { // Exchange EMFs in X2 template -void ConstrainedTransport::ExchangeX2() { +void ConstrainedTransport::ExchangeX2(IdefixArray3D ex, IdefixArray3D ez) { idfx::pushRegion("Emf::ExchangeX2"); // Load the buffers with data @@ -133,8 +123,6 @@ void ConstrainedTransport::ExchangeX2() { [[maybe_unused]] int nz; IdefixArray1D BufferLeft=BufferSendX2[faceLeft]; IdefixArray1D BufferRight=BufferSendX2[faceRight]; - IdefixArray3D ex=this->ex; - IdefixArray3D ez=this->ez; // If MPI Persistent, start receiving even before the buffers are filled double tStart = MPI_Wtime(); @@ -158,7 +146,6 @@ void ConstrainedTransport::ExchangeX2() { idefix_for("LoadBufferX2Emfz",kbeg,kend,ibeg,iend+1, KOKKOS_LAMBDA (int k, int i) { - BufferLeft( (i-ibeg) + (k-kbeg)*(nx+1) ) = ez(k,jleft,i); BufferRight( (i-ibeg) + (k-kbeg)*(nx+1) ) = ez(k,jright,i); } ); @@ -167,7 +154,6 @@ void ConstrainedTransport::ExchangeX2() { idefix_for("LoadBufferX1Emfx",kbeg,kend+1,ibeg,iend, KOKKOS_LAMBDA (int k, int i) { - BufferLeft( (i-ibeg) + (k-kbeg)*nx + Vsindex ) = ex(k,jleft,i); BufferRight( (i-ibeg) + (k-kbeg)*nx + Vsindex ) = ex(k,jright,i); } ); @@ -191,12 +177,7 @@ void ConstrainedTransport::ExchangeX2() { idefix_for("StoreBufferX2Emfz",kbeg,kend,ibeg,iend+1, KOKKOS_LAMBDA (int k, int i) { if(lbound == internal || lbound == periodic) { - ez(k,jleft,i) = HALF_F*( - BufferLeft( (i-ibeg) + (k-kbeg)*(nx+1) ) + ez(k,jleft,i) ); - } - if(rbound == internal || rbound == periodic) { - ez(k,jright,i) = HALF_F*( - BufferRight( (i-ibeg) + (k-kbeg)*(nx+1) ) + ez(k,jright,i) ); + ez(k,jleft,i) = BufferLeft( (i-ibeg) + (k-kbeg)*(nx+1) ); } }); #if DIMENSIONS == 3 @@ -204,12 +185,7 @@ void ConstrainedTransport::ExchangeX2() { idefix_for("StoreBufferX1Emfy",kbeg,kend+1,ibeg,iend, KOKKOS_LAMBDA (int k, int i) { if(lbound == internal || lbound == periodic) { - ex(k,jleft,i) = HALF_F*( - BufferLeft( (i-ibeg) + (k-kbeg)*nx +Vsindex) + ex(k,jleft,i) ); - } - if(rbound == internal || rbound == periodic) { - ex(k,jright,i) = HALF_F*( - BufferRight( (i-ibeg) + (k-kbeg)*nx +Vsindex) + ex(k,jright,i) ); + ex(k,jleft,i) = BufferLeft( (i-ibeg) + (k-kbeg)*nx +Vsindex); } }); #endif @@ -220,7 +196,7 @@ void ConstrainedTransport::ExchangeX2() { // Exchange EMFs in X3 template -void ConstrainedTransport::ExchangeX3() { +void ConstrainedTransport::ExchangeX3(IdefixArray3D ex, IdefixArray3D ey) { idfx::pushRegion("Emf::ExchangeX3"); @@ -229,8 +205,6 @@ void ConstrainedTransport::ExchangeX3() { int nx,ny; IdefixArray1D BufferLeft=BufferSendX3[faceLeft]; IdefixArray1D BufferRight=BufferSendX3[faceRight]; - IdefixArray3D ex=this->ex; - IdefixArray3D ey=this->ey; int Vsindex = 0; @@ -259,7 +233,6 @@ void ConstrainedTransport::ExchangeX3() { idefix_for("LoadBufferX3Emfx",jbeg,jend+1,ibeg,iend, KOKKOS_LAMBDA (int j, int i) { - BufferLeft( (i-ibeg) + (j-jbeg)*nx ) = ex(kleft,j,i); BufferRight( (i-ibeg) + (j-jbeg)*nx ) = ex(kright,j,i); } ); @@ -267,7 +240,6 @@ void ConstrainedTransport::ExchangeX3() { idefix_for("LoadBufferX3Emfy",jbeg,jend,ibeg,iend+1, KOKKOS_LAMBDA (int j, int i) { - BufferLeft( (i-ibeg) + (j-jbeg)*(nx+1) + Vsindex ) = ey(kleft,j,i); BufferRight( (i-ibeg) + (j-jbeg)*(nx+1) + Vsindex ) = ey(kright,j,i); } ); @@ -290,12 +262,7 @@ void ConstrainedTransport::ExchangeX3() { idefix_for("StoreBufferX3Emfx",jbeg,jend+1,ibeg,iend, KOKKOS_LAMBDA (int j, int i) { if(lbound == internal || lbound == periodic) { - ex(kleft,j,i) = HALF_F*( - BufferLeft( (i-ibeg) + (j-jbeg)*nx ) + ex(kleft,j,i) ); - } - if(rbound == internal || rbound == periodic) { - ex(kright,j,i) = HALF_F*( - BufferRight( (i-ibeg) + (j-jbeg)*nx ) + ex(kright,j,i) ); + ex(kleft,j,i) = BufferLeft( (i-ibeg) + (j-jbeg)*nx ); } }); @@ -303,16 +270,9 @@ void ConstrainedTransport::ExchangeX3() { idefix_for("StoreBufferX3Emfy",jbeg,jend,ibeg,iend+1, KOKKOS_LAMBDA (int j, int i) { if(lbound == internal || lbound == periodic) { - ey(kleft,j,i) = HALF_F*( - BufferLeft( (i-ibeg) + (j-jbeg)*(nx+1) + Vsindex ) + ey(kleft,j,i) ); - } - if(rbound == internal || rbound == periodic) { - ey(kright,j,i) = HALF_F*( - BufferRight( (i-ibeg) + (j-jbeg)*(nx+1) + Vsindex ) + ey(kright,j,i) ); + ey(kleft,j,i) = BufferLeft( (i-ibeg) + (j-jbeg)*(nx+1) + Vsindex ); } }); - - idfx::popRegion(); } diff --git a/src/fluid/constrainedTransport/constrainedTransport.hpp b/src/fluid/constrainedTransport/constrainedTransport.hpp index c32e4ec96..24c317c09 100644 --- a/src/fluid/constrainedTransport/constrainedTransport.hpp +++ b/src/fluid/constrainedTransport/constrainedTransport.hpp @@ -101,13 +101,17 @@ class ConstrainedTransport { // Routines for evolving the magnetic potential (only available when EVOLVE_VECTOR_POTENTIAL) void EvolveVectorPotential(real, IdefixArray4D &); void ComputeMagFieldFromA(IdefixArray4D &Vein, IdefixArray4D &Vsout); + void EnforceVectorPotentialBoundary(IdefixArray4D &Vein); // Enforce BCs on A + void EnforceEMFBoundaryPeriodic(IdefixArray3D ex, + IdefixArray3D ey, + IdefixArray3D ez); #ifdef WITH_MPI // Exchange surface EMFs to remove interprocess round off errors - void ExchangeAll(); - void ExchangeX1(); - void ExchangeX2(); - void ExchangeX3(); + void ExchangeAll(IdefixArray3D ex, IdefixArray3D ey, IdefixArray3D ez); + void ExchangeX1(IdefixArray3D ey, IdefixArray3D ez); + void ExchangeX2(IdefixArray3D ex, IdefixArray3D ez); + void ExchangeX3(IdefixArray3D ex, IdefixArray3D ey); #endif private: @@ -449,6 +453,7 @@ void ConstrainedTransport::ShowConfig() { #include "calcRiemannEmf.hpp" #include "EMFexchange.hpp" #include "enforceEMFBoundary.hpp" +#include "enforceVectorPotentialBoundary.hpp" #include "evolveMagField.hpp" #include "evolveVectorPotential.hpp" diff --git a/src/fluid/constrainedTransport/enforceEMFBoundary.hpp b/src/fluid/constrainedTransport/enforceEMFBoundary.hpp index 192ec12e4..9d3d6913f 100644 --- a/src/fluid/constrainedTransport/enforceEMFBoundary.hpp +++ b/src/fluid/constrainedTransport/enforceEMFBoundary.hpp @@ -19,8 +19,9 @@ // This is because in some specific cases involving curvilinear coordinates, roundoff errors // can accumulate, leading to a small drift of face-values that should be strictly equal. // This behaviour is enabled using the flag below. - -//#define ENFORCE_EMF_CONSISTENCY +#ifdef EVOLVE_VECTOR_POTENTIAL + #define ENFORCE_EMF_CONSISTENCY +#endif template void ConstrainedTransport::EnforceEMFBoundary() { @@ -30,81 +31,73 @@ void ConstrainedTransport::EnforceEMFBoundary() { this->data->hydro->emfBoundaryFunc(*data, data->t); if(this->data->hydro->haveAxis) { - this->data->hydro->boundary->axis->RegularizeEMFs(); + this->data->hydro->boundary->axis->RegularizeEMFs(this->ex, this->ey, this->ez); } #ifdef ENFORCE_EMF_CONSISTENCY #ifdef WITH_MPI // This average the EMFs at the domain surface with immediate neighbours // to ensure the EMFs exactly match - this->ExchangeAll(); + this->ExchangeAll(this->ex, this->ey, this->ez); #endif #endif - IdefixArray3D ex = this->ex; - IdefixArray3D ey = this->ey; - IdefixArray3D ez = this->ez; - // Enforce specific EMF regularisation for(int dir=0 ; dir < DIMENSIONS ; dir++ ) { if(data->lbound[dir] == shearingbox || data->rbound[dir] == shearingbox) { SymmetrizeEMFShearingBox(); } - #ifdef ENFORCE_EMF_CONSISTENCY - if(data->lbound[dir] == periodic && data->rbound[dir] == periodic) { - // If domain decomposed, periodicity is already enforced by ExchangeAll - if(data->mygrid->nproc[dir] == 1) { - int ioffset = (dir == IDIR) ? data->np_int[IDIR] : 0; - int joffset = (dir == JDIR) ? data->np_int[JDIR] : 0; - int koffset = (dir == KDIR) ? data->np_int[KDIR] : 0; - - int ibeg = (dir == IDIR) ? data->beg[IDIR] : 0; - int iend = (dir == IDIR) ? data->beg[IDIR]+1 : data->np_tot[IDIR]; - int jbeg = (dir == JDIR) ? data->beg[JDIR] : 0; - int jend = (dir == JDIR) ? data->beg[JDIR]+1 : data->np_tot[JDIR]; - int kbeg = (dir == KDIR) ? data->beg[KDIR] : 0; - int kend = (dir == KDIR) ? data->beg[KDIR]+1 : data->np_tot[KDIR]; - idefix_for("BoundaryEMFPeriodic",kbeg,kend,jbeg,jend,ibeg,iend, - KOKKOS_LAMBDA (int k, int j, int i) { - real em; - - if(dir==IDIR) { - em = HALF_F*(ez(k,j,i)+ez(k,j,i+ioffset)); - ez(k,j,i) = em; - ez(k,j,i+ioffset) = em; - - #if DIMENSIONS == 3 - em = HALF_F*(ey(k,j,i)+ey(k,j,i+ioffset)); - ey(k,j,i) = em; - ey(k,j,i+ioffset) = em; - #endif - } - - if(dir==JDIR) { - em = HALF_F*(ez(k,j,i)+ez(k,j+joffset,i)); - ez(k,j,i) = em; - ez(k,j+joffset,i) = em; - - #if DIMENSIONS == 3 - em = HALF_F*(ex(k,j,i)+ex(k,j+joffset,i)); - ex(k,j,i) = em; - ex(k,j+joffset,i) = em; - #endif - } - - if(dir==KDIR) { - em = HALF_F*(ex(k,j,i)+ex(k+koffset,j,i)); - ex(k,j,i) = em; - ex(k+koffset,j,i) = em; - - em = HALF_F*(ey(k,j,i)+ey(k+koffset,j,i)); - ey(k,j,i) = em; - ey(k+koffset,j,i) = em; - } - }); - } + } + #ifdef ENFORCE_EMF_CONSISTENCY + EnforceEMFBoundaryPeriodic(this->ex, this->ey, this->ez); + #endif //ENFORCE_EMF_CONSISTENCY +#endif // MHD==YES + idfx::popRegion(); +} + +template +void ConstrainedTransport::EnforceEMFBoundaryPeriodic(IdefixArray3D ex, + IdefixArray3D ey, + IdefixArray3D ez) { + idfx::pushRegion("Emf::EnforceEMFBoundaryPeriodic"); + #if MHD == YES + for(int dir=0 ; dir < DIMENSIONS ; dir++ ) { + if(data->lbound[dir] == periodic && data->rbound[dir] == periodic) { + // If domain decomposed, periodicity is already enforced by ExchangeAll + if(data->mygrid->nproc[dir] == 1) { + int ioffset = (dir == IDIR) ? data->np_int[IDIR] : 0; + int joffset = (dir == JDIR) ? data->np_int[JDIR] : 0; + int koffset = (dir == KDIR) ? data->np_int[KDIR] : 0; + + int ibeg = (dir == IDIR) ? data->beg[IDIR] : 0; + int iend = (dir == IDIR) ? data->beg[IDIR]+1 : data->np_tot[IDIR]; + int jbeg = (dir == JDIR) ? data->beg[JDIR] : 0; + int jend = (dir == JDIR) ? data->beg[JDIR]+1 : data->np_tot[JDIR]; + int kbeg = (dir == KDIR) ? data->beg[KDIR] : 0; + int kend = (dir == KDIR) ? data->beg[KDIR]+1 : data->np_tot[KDIR]; + idefix_for("BoundaryEMFPeriodic",kbeg,kend,jbeg,jend,ibeg,iend, + KOKKOS_LAMBDA (int k, int j, int i) { + if(dir==IDIR) { + ez(k,j,i+ioffset) = ez(k,j,i); + #if DIMENSIONS == 3 + ey(k,j,i+ioffset) = ey(k,j,i); + #endif + } + + if(dir==JDIR) { + ez(k,j+joffset,i) = ez(k,j,i); + #if DIMENSIONS == 3 + ex(k,j+joffset,i) = ex(k,j,i); + #endif + } + + if(dir==KDIR) { + ex(k+koffset,j,i) = ex(k,j,i); + ey(k+koffset,j,i) = ey(k,j,i); + } + }); } - #endif //ENFORCE_EMF_CONSISTENCY + } } #endif // MHD==YES idfx::popRegion(); @@ -148,11 +141,13 @@ void ConstrainedTransport::SymmetrizeEMFShearingBox() { if(data->lbound[IDIR]==shearingbox) { // We send to our left (which, by periodicity, is the right end of the domain) // our value of sbEyL and get + Kokkos::fence(); MPI_Sendrecv(sbEyL.data(), size, realMPI, procLeft, 2001, sbEyR.data(), size, realMPI, procLeft, 2002, data->mygrid->CartComm, &status ); } if(data->rbound[IDIR]==shearingbox) { + Kokkos::fence(); // We send to our right (which, by periodicity, is the left end (=beginning) // of the domain) our value of sbEyR and get sbEyL MPI_Sendrecv(sbEyR.data(), size, realMPI, procRight, 2002, diff --git a/src/fluid/constrainedTransport/enforceVectorPotentialBoundary.hpp b/src/fluid/constrainedTransport/enforceVectorPotentialBoundary.hpp new file mode 100644 index 000000000..3019edfd8 --- /dev/null +++ b/src/fluid/constrainedTransport/enforceVectorPotentialBoundary.hpp @@ -0,0 +1,44 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + +#ifndef FLUID_CONSTRAINEDTRANSPORT_ENFORCEVECTORPOTENTIALBOUNDARY_HPP_ +#define FLUID_CONSTRAINEDTRANSPORT_ENFORCEVECTORPOTENTIALBOUNDARY_HPP_ +#include "constrainedTransport.hpp" + +template +void ConstrainedTransport::EnforceVectorPotentialBoundary(IdefixArray4D &Vein) { + idfx::pushRegion("Emf::EnforceVectorPotentialBoundary"); + + + IdefixArray3D Ax1, Ax2, Ax3; + + #ifdef EVOLVE_VECTOR_POTENTIAL + #if DIMENSIONS == 3 + Ax1 = Kokkos::subview(Vein, AX1e, Kokkos::ALL(), Kokkos::ALL(), Kokkos::ALL()); + Ax2 = Kokkos::subview(Vein, AX2e, Kokkos::ALL(), Kokkos::ALL(), Kokkos::ALL()); + #endif + Ax3 = Kokkos::subview(Vein, AX3e, Kokkos::ALL(), Kokkos::ALL(), Kokkos::ALL()); + + + if(this->hydro->haveAxis) { + this->hydro->boundary->axis->RegularizeEMFs(Ax1, Ax2, Ax3); + } + + #ifdef ENFORCE_EMF_CONSISTENCY + #ifdef WITH_MPI + // This average the vector potential at the domain surface with immediate neighbours + // to ensure the vector potentials exactly match + + this->ExchangeAll(Ax1, Ax2, Ax3); + #endif + EnforceEMFBoundaryPeriodic(Ax1, Ax2, Ax3); + #endif + #endif // EVOLVE_VECTOR_POTENTIAL + + idfx::popRegion(); +} +#endif // FLUID_CONSTRAINEDTRANSPORT_ENFORCEVECTORPOTENTIALBOUNDARY_HPP_ diff --git a/src/global.hpp b/src/global.hpp index 5a9ea70fb..c4de3926e 100644 --- a/src/global.hpp +++ b/src/global.hpp @@ -39,7 +39,7 @@ template IdefixArray1D ConvertVectorToIdefixArray(std::vector &inputVector) { IdefixArray1D outArr = IdefixArray1D("Vector",inputVector.size()); IdefixHostArray1D outArrHost; - outArrHost = Kokkos::create_mirror_view(outArr); + outArrHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), outArr); for(int i = 0; i < inputVector.size() ; i++) { outArrHost(i) = inputVector[i]; } diff --git a/src/gravity/laplacian.cpp b/src/gravity/laplacian.cpp index 2300db82d..7ac329ea3 100644 --- a/src/gravity/laplacian.cpp +++ b/src/gravity/laplacian.cpp @@ -103,7 +103,7 @@ Laplacian::Laplacian(DataBlock *datain, std::array left std::vector mapVars; mapVars.push_back(ntarget); - this->mpi.Init(data->mygrid, mapVars, this->nghost.data(), this->np_int.data()); + this->mpi.Init(data->mygrid, mapVars, nghost, np_int, datain->lbound, datain->rbound, false); #endif idfx::popRegion(); diff --git a/src/grid.hpp b/src/grid.hpp index 0ffdb3a58..947d05d12 100644 --- a/src/grid.hpp +++ b/src/grid.hpp @@ -81,7 +81,7 @@ class SubGrid { parentGrid(grid), type(type), direction(d) { idfx::pushRegion("SubGrid::SubGrid()"); // Find the index of the current subgrid. - auto x = Kokkos::create_mirror_view(parentGrid->x[direction]); + auto x = Kokkos::create_mirror_view(Kokkos::HostSpace(), parentGrid->x[direction]); Kokkos::deep_copy(x,parentGrid->x[direction]); int iref = -1; for(int i = 0 ; i < x.extent(0) - 1 ; i++) { diff --git a/src/gridHost.cpp b/src/gridHost.cpp index ad61f0436..0c85d8e41 100644 --- a/src/gridHost.cpp +++ b/src/gridHost.cpp @@ -32,10 +32,10 @@ GridHost::GridHost(Grid &grid) { // Create mirrors on host for(int dir = 0 ; dir < 3 ; dir++) { - x[dir] = Kokkos::create_mirror_view(grid.x[dir]); - xr[dir] = Kokkos::create_mirror_view(grid.xr[dir]); - xl[dir] = Kokkos::create_mirror_view(grid.xl[dir]); - dx[dir] = Kokkos::create_mirror_view(grid.dx[dir]); + x[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), grid.x[dir]); + xr[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), grid.xr[dir]); + xl[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), grid.xl[dir]); + dx[dir] = Kokkos::create_mirror_view(Kokkos::HostSpace(), grid.dx[dir]); } idfx::popRegion(); diff --git a/src/macros.hpp b/src/macros.hpp index 2c7bda127..0df5f9430 100644 --- a/src/macros.hpp +++ b/src/macros.hpp @@ -25,7 +25,6 @@ #if COMPONENTS == 3 #define EXPAND(a,b,c) a b c #define SELECT(a,b,c) c - #endif #if DIMENSIONS == 1 diff --git a/src/main.cpp b/src/main.cpp index 164c134ee..a2dcd630a 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -9,7 +9,7 @@ //@HEADER // ************************************************************************ // -// IDEFIX v 2.2.01 +// IDEFIX v 2.3.0 // // ************************************************************************ //@HEADER @@ -192,7 +192,9 @@ int main( int argc, char* argv[] ) { try { Tint.Cycle(data); } catch(std::exception &e) { - idfx::cout << "Main: WARNING! Caught an exception in TimeIntegrator." << std::endl; + idfx::cout << std::endl + << "Main: WARNING! Caught an exception in TimeIntegrator." + << std::endl; #ifdef WITH_MPI if(!Mpi::CheckSync(5)) { std::stringstream message; diff --git a/src/mpi.cpp b/src/mpi.cpp deleted file mode 100644 index 4de7739cb..000000000 --- a/src/mpi.cpp +++ /dev/null @@ -1,992 +0,0 @@ -// *********************************************************************************** -// Idefix MHD astrophysical code -// Copyright(C) Geoffroy R. J. Lesur -// and other code contributors -// Licensed under CeCILL 2.1 License, see COPYING for more information -// *********************************************************************************** - - -#include "mpi.hpp" -#include -#include -#include // NOLINT [build/c++11] -#include // NOLINT [build/c++11] -#include -#include -#include "idefix.hpp" -#include "dataBlock.hpp" - - -#if defined(OPEN_MPI) && OPEN_MPI -#include "mpi-ext.h" // Needed for CUDA-aware check */ -#endif - - -//#define MPI_NON_BLOCKING -#define MPI_PERSISTENT - -// init the number of instances -int Mpi::nInstances = 0; - -// MPI Routines exchange -void Mpi::ExchangeAll() { - IDEFIX_ERROR("Not Implemented"); -} - -/// -/// Initialise an instance of the MPI class. -/// @param grid: pointer to the grid object (needed to get the MPI neighbours) -/// @param inputMap: 1st indices of inputVc which are to be exchanged (i.e, the list of variables) -/// @param nghost: size of the ghost region in each direction -/// @param nint: size of the internal region in each direction -/// @param inputHaveVs: whether the instance should also treat face-centered variable -/// (optional, default false) -/// - -void Mpi::Init(Grid *grid, std::vector inputMap, - int nghost[3], int nint[3], - bool inputHaveVs) { - idfx::pushRegion("Mpi::Init"); - this->mygrid = grid; - - // increase the number of instances - nInstances++; - thisInstance=nInstances; - - // Transfer the vector of indices as an IdefixArray on the target - - // Allocate mapVars on target and copy it from the input argument list - this->mapVars = idfx::ConvertVectorToIdefixArray(inputMap); - this->mapNVars = inputMap.size(); - this->haveVs = inputHaveVs; - - // Compute indices of arrays we will be working with - for(int dir = 0 ; dir < 3 ; dir++) { - this->nghost[dir] = nghost[dir]; - this->nint[dir] = nint[dir]; - this->ntot[dir] = nint[dir]+2*nghost[dir]; - this->beg[dir] = nghost[dir]; - this->end[dir] = nghost[dir]+nint[dir]; - } - - ///////////////////////////////////////////////////////////////////////////// - // Init exchange datasets - bufferSizeX1 = 0; - bufferSizeX2 = 0; - bufferSizeX3 = 0; - - // Number of cells in X1 boundary condition: - bufferSizeX1 = nghost[IDIR] * nint[JDIR] * nint[KDIR] * mapNVars; - - if(haveVs) { - bufferSizeX1 += nghost[IDIR] * nint[JDIR] * nint[KDIR]; - #if DIMENSIONS>=2 - bufferSizeX1 += nghost[IDIR] * (nint[JDIR]+1) * nint[KDIR]; - #endif - - #if DIMENSIONS==3 - bufferSizeX1 += nghost[IDIR] * nint[JDIR] * (nint[KDIR]+1); - #endif // DIMENSIONS - } - - - BufferRecvX1[faceLeft ] = Buffer(bufferSizeX1); - BufferRecvX1[faceRight] = Buffer(bufferSizeX1); - BufferSendX1[faceLeft ] = Buffer(bufferSizeX1); - BufferSendX1[faceRight] = Buffer(bufferSizeX1); - - // Number of cells in X2 boundary condition (only required when problem >2D): -#if DIMENSIONS >= 2 - bufferSizeX2 = ntot[IDIR] * nghost[JDIR] * nint[KDIR] * mapNVars; - if(haveVs) { - // IDIR - bufferSizeX2 += (ntot[IDIR]+1) * nghost[JDIR] * nint[KDIR]; - #if DIMENSIONS>=2 - bufferSizeX2 += ntot[IDIR] * nghost[JDIR] * nint[KDIR]; - #endif - #if DIMENSIONS==3 - bufferSizeX2 += ntot[IDIR] * nghost[JDIR] * (nint[KDIR]+1); - #endif // DIMENSIONS - } - - BufferRecvX2[faceLeft ] = Buffer(bufferSizeX2); - BufferRecvX2[faceRight] = Buffer(bufferSizeX2); - BufferSendX2[faceLeft ] = Buffer(bufferSizeX2); - BufferSendX2[faceRight] = Buffer(bufferSizeX2); - -#endif -// Number of cells in X3 boundary condition (only required when problem is 3D): -#if DIMENSIONS ==3 - bufferSizeX3 = ntot[IDIR] * ntot[JDIR] * nghost[KDIR] * mapNVars; - - if(haveVs) { - // IDIR - bufferSizeX3 += (ntot[IDIR]+1) * ntot[JDIR] * nghost[KDIR]; - // JDIR - bufferSizeX3 += ntot[IDIR] * (ntot[JDIR]+1) * nghost[KDIR]; - // KDIR - bufferSizeX3 += ntot[IDIR] * ntot[JDIR] * nghost[KDIR]; - } - - BufferRecvX3[faceLeft ] = Buffer(bufferSizeX3); - BufferRecvX3[faceRight] = Buffer(bufferSizeX3); - BufferSendX3[faceLeft ] = Buffer(bufferSizeX3); - BufferSendX3[faceRight] = Buffer(bufferSizeX3); -#endif // DIMENSIONS - -#ifdef MPI_PERSISTENT - // Init persistent MPI communications - int procSend, procRecv; - - // X1-dir exchanges - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX1[faceRight].data(), bufferSizeX1, realMPI, procSend, - thisInstance*1000, mygrid->CartComm, &sendRequestX1[faceRight])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX1[faceLeft].data(), bufferSizeX1, realMPI, procRecv, - thisInstance*1000, mygrid->CartComm, &recvRequestX1[faceLeft])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX1[faceLeft].data(), bufferSizeX1, realMPI, procSend, - thisInstance*1000+1,mygrid->CartComm, &sendRequestX1[faceLeft])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX1[faceRight].data(), bufferSizeX1, realMPI, procRecv, - thisInstance*1000+1,mygrid->CartComm, &recvRequestX1[faceRight])); - - #if DIMENSIONS >= 2 - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX2[faceRight].data(), bufferSizeX2, realMPI, procSend, - thisInstance*1000+10, mygrid->CartComm, &sendRequestX2[faceRight])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX2[faceLeft].data(), bufferSizeX2, realMPI, procRecv, - thisInstance*1000+10, mygrid->CartComm, &recvRequestX2[faceLeft])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX2[faceLeft].data(), bufferSizeX2, realMPI, procSend, - thisInstance*1000+11, mygrid->CartComm, &sendRequestX2[faceLeft])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX2[faceRight].data(), bufferSizeX2, realMPI, procRecv, - thisInstance*1000+11, mygrid->CartComm, &recvRequestX2[faceRight])); - #endif - - #if DIMENSIONS == 3 - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX3[faceRight].data(), bufferSizeX3, realMPI, procSend, - thisInstance*1000+20, mygrid->CartComm, &sendRequestX3[faceRight])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX3[faceLeft].data(), bufferSizeX3, realMPI, procRecv, - thisInstance*1000+20, mygrid->CartComm, &recvRequestX3[faceLeft])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Send_init(BufferSendX3[faceLeft].data(), bufferSizeX3, realMPI, procSend, - thisInstance*1000+21, mygrid->CartComm, &sendRequestX3[faceLeft])); - - MPI_SAFE_CALL(MPI_Recv_init(BufferRecvX3[faceRight].data(), bufferSizeX3, realMPI, procRecv, - thisInstance*1000+21, mygrid->CartComm, &recvRequestX3[faceRight])); - #endif - -#endif // MPI_Persistent - - // say this instance is initialized. - isInitialized = true; - - idfx::popRegion(); -} - -// Destructor (clean up persistent communication channels) -Mpi::~Mpi() { - idfx::pushRegion("Mpi::~Mpi"); - if(isInitialized) { - // Properly clean up the mess - #ifdef MPI_PERSISTENT - idfx::cout << "Mpi(" << thisInstance - << "): Cleaning up MPI persistent communication channels" << std::endl; - for(int i=0 ; i< 2; i++) { - MPI_Request_free( &sendRequestX1[i]); - MPI_Request_free( &recvRequestX1[i]); - - #if DIMENSIONS >= 2 - MPI_Request_free( &sendRequestX2[i]); - MPI_Request_free( &recvRequestX2[i]); - #endif - - #if DIMENSIONS == 3 - MPI_Request_free( &sendRequestX3[i]); - MPI_Request_free( &recvRequestX3[i]); - #endif - } - #endif - if(thisInstance==1) { - idfx::cout << "Mpi(" << thisInstance << "): measured throughput is " - << bytesSentOrReceived/myTimer/1024.0/1024.0 << " MB/s" << std::endl; - idfx::cout << "Mpi(" << thisInstance << "): message sizes were " << std::endl; - idfx::cout << " X1: " << bufferSizeX1*sizeof(real)/1024.0/1024.0 << " MB" << std::endl; - idfx::cout << " X2: " << bufferSizeX2*sizeof(real)/1024.0/1024.0 << " MB" << std::endl; - idfx::cout << " X3: " << bufferSizeX3*sizeof(real)/1024.0/1024.0 << " MB" << std::endl; - } - isInitialized = false; - } - idfx::popRegion(); -} - -void Mpi::ExchangeX1(IdefixArray4D Vc, IdefixArray4D Vs) { - idfx::pushRegion("Mpi::ExchangeX1"); - - // Load the buffers with data - int ibeg,iend,jbeg,jend,kbeg,kend,offset,nx; - Buffer BufferLeft = BufferSendX1[faceLeft]; - Buffer BufferRight = BufferSendX1[faceRight]; - IdefixArray1D map = this->mapVars; - - // If MPI Persistent, start receiving even before the buffers are filled - myTimer -= MPI_Wtime(); - double tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - - MPI_SAFE_CALL(MPI_Startall(2, recvRequestX1)); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; -#endif - myTimer += MPI_Wtime(); - - // Coordinates of the ghost region which needs to be transfered - ibeg = 0; - iend = nghost[IDIR]; - nx = nghost[IDIR]; // Number of points in x - offset = end[IDIR]; // Distance between beginning of left and right ghosts - jbeg = beg[JDIR]; - jend = end[JDIR]; - - kbeg = beg[KDIR]; - kend = end[KDIR]; - - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - BufferLeft.Pack(Vc, map, std::make_pair(ibeg+nx, iend+nx), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vc, map, std::make_pair(ibeg+offset-nx, iend+offset-nx), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - // Load face-centered field in the buffer - if(haveVs) { - BufferLeft.Pack(Vs, BX1s,std::make_pair(ibeg+nx+1, iend+nx+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vs, BX1s, std::make_pair(ibeg+offset-nx, iend+offset-nx), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - #if DIMENSIONS >= 2 - - BufferLeft.Pack(Vs, BX2s,std::make_pair(ibeg+nx, iend+nx), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vs, BX2s, std::make_pair(ibeg+offset-nx, iend+offset-nx), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg , kend)); - - #endif - - #if DIMENSIONS == 3 - - BufferLeft.Pack(Vs, BX3s,std::make_pair(ibeg+nx, iend+nx), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend+1)); - - BufferRight.Pack(Vs, BX3s, std::make_pair(ibeg+offset-nx, iend+offset-nx), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend+1)); - - #endif - } - - // Wait for completion before sending out everything - Kokkos::fence(); - myTimer -= MPI_Wtime(); - tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_SAFE_CALL(MPI_Startall(2, sendRequestX1)); - // Wait for buffers to be received - MPI_Waitall(2,recvRequestX1,recvStatus); - -#else - int procSend, procRecv; - - #ifdef MPI_NON_BLOCKING - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - MPI_Request sendRequest[2]; - MPI_Request recvRequest[2]; - - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX1[faceRight].data(), bufferSizeX1, realMPI, procSend, 100, - mygrid->CartComm, &sendRequest[0])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX1[faceLeft].data(), bufferSizeX1, realMPI, procRecv, 100, - mygrid->CartComm, &recvRequest[0])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX1[faceLeft].data(), bufferSizeX1, realMPI, procSend, 101, - mygrid->CartComm, &sendRequest[1])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX1[faceRight].data(), bufferSizeX1, realMPI, procRecv, 101, - mygrid->CartComm, &recvRequest[1])); - - // Wait for recv to complete (we don't care about the sends) - MPI_Waitall(2, recvRequest, recvStatus); - - #else - MPI_Status status; - // Send to the right - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX1[faceRight].data(), bufferSizeX1, realMPI, procSend, 100, - BufferRecvX1[faceLeft].data(), bufferSizeX1, realMPI, procRecv, 100, - mygrid->CartComm, &status)); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,0,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX1[faceLeft].data(), bufferSizeX1, realMPI, procSend, 101, - BufferRecvX1[faceRight].data(), bufferSizeX1, realMPI, procRecv, 101, - mygrid->CartComm, &status)); - #endif -#endif - myTimer += MPI_Wtime(); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; - // Unpack - BufferLeft=BufferRecvX1[faceLeft]; - BufferRight=BufferRecvX1[faceRight]; - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - BufferLeft.Unpack(Vc, map,std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vc, map,std::make_pair(ibeg+offset, iend+offset), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - // We fill the ghost zones - - if(haveVs) { - BufferLeft.Unpack(Vs, BX1s, std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vs, BX1s, std::make_pair(ibeg+offset+1, iend+offset+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - #if DIMENSIONS >= 2 - BufferLeft.Unpack(Vs, BX2s, std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vs, BX2s, std::make_pair(ibeg+offset, iend+offset), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg , kend)); - #endif - - #if DIMENSIONS == 3 - BufferLeft.Unpack(Vs, BX3s, std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend+1)); - - BufferRight.Unpack(Vs, BX3s, std::make_pair(ibeg+offset, iend+offset), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend+1)); - #endif - } - -myTimer -= MPI_Wtime(); -#ifdef MPI_NON_BLOCKING - // Wait for the sends if they have not yet completed - MPI_Waitall(2, sendRequest, sendStatus); -#endif - -#ifdef MPI_PERSISTENT - MPI_Waitall(2, sendRequestX1, sendStatus); -#endif - myTimer += MPI_Wtime(); - bytesSentOrReceived += 4*bufferSizeX1*sizeof(real); - - idfx::popRegion(); -} - - -void Mpi::ExchangeX2(IdefixArray4D Vc, IdefixArray4D Vs) { - idfx::pushRegion("Mpi::ExchangeX2"); - - // Load the buffers with data - int ibeg,iend,jbeg,jend,kbeg,kend,offset,ny; - Buffer BufferLeft=BufferSendX2[faceLeft]; - Buffer BufferRight=BufferSendX2[faceRight]; - IdefixArray1D map = this->mapVars; - -// If MPI Persistent, start receiving even before the buffers are filled - myTimer -= MPI_Wtime(); - double tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - - MPI_SAFE_CALL(MPI_Startall(2, recvRequestX2)); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; -#endif - myTimer += MPI_Wtime(); - - // Coordinates of the ghost region which needs to be transfered - ibeg = 0; - iend = ntot[IDIR]; - - jbeg = 0; - jend = nghost[JDIR]; - offset = end[JDIR]; // Distance between beginning of left and right ghosts - ny = nghost[JDIR]; - - kbeg = beg[KDIR]; - kend = end[KDIR]; - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - BufferLeft.Pack(Vc, map, std::make_pair(ibeg , iend), - std::make_pair(jbeg+ny , jend+ny), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vc, map, std::make_pair(ibeg , iend), - std::make_pair(jbeg+offset-ny , jend+offset-ny), - std::make_pair(kbeg , kend)); - - // Load face-centered field in the buffer - if(haveVs) { - BufferLeft.Pack(Vs, BX1s,std::make_pair(ibeg , iend+1), - std::make_pair(jbeg+ny , jend+ny), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vs, BX1s, std::make_pair(ibeg , iend+1), - std::make_pair(jbeg+offset-ny , jend+offset-ny), - std::make_pair(kbeg , kend)); - #if DIMENSIONS >= 2 - BufferLeft.Pack(Vs, BX2s,std::make_pair(ibeg , iend), - std::make_pair(jbeg+ny+1 , jend+ny+1), - std::make_pair(kbeg , kend)); - - BufferRight.Pack(Vs, BX2s, std::make_pair(ibeg , iend), - std::make_pair(jbeg+offset-ny , jend+offset-ny), - std::make_pair(kbeg , kend)); - #endif - #if DIMENSIONS == 3 - - BufferLeft.Pack(Vs, BX3s,std::make_pair(ibeg , iend), - std::make_pair(jbeg+ny , jend+ny), - std::make_pair(kbeg , kend+1)); - - BufferRight.Pack(Vs, BX3s, std::make_pair(ibeg , iend), - std::make_pair(jbeg+offset-ny , jend+offset-ny), - std::make_pair(kbeg , kend+1)); - - #endif - } - - // Send to the right - Kokkos::fence(); - - myTimer -= MPI_Wtime(); - tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_SAFE_CALL(MPI_Startall(2, sendRequestX2)); - MPI_Waitall(2,recvRequestX2,recvStatus); - -#else - int procSend, procRecv; - - #ifdef MPI_NON_BLOCKING - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - MPI_Request sendRequest[2]; - MPI_Request recvRequest[2]; - - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX2[faceRight].data(), bufferSizeX2, realMPI, procSend, 100, - mygrid->CartComm, &sendRequest[0])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX2[faceLeft].data(), bufferSizeX2, realMPI, procRecv, 100, - mygrid->CartComm, &recvRequest[0])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX2[faceLeft].data(), bufferSizeX2, realMPI, procSend, 101, - mygrid->CartComm, &sendRequest[1])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX2[faceRight].data(), bufferSizeX2, realMPI, procRecv, 101, - mygrid->CartComm, &recvRequest[1])); - - // Wait for recv to complete (we don't care about the sends) - MPI_Waitall(2, recvRequest, recvStatus); - - #else - MPI_Status status; - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX2[faceRight].data(), bufferSizeX2, realMPI, procSend, 200, - BufferRecvX2[faceLeft].data(), bufferSizeX2, realMPI, procRecv, 200, - mygrid->CartComm, &status)); - - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,1,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX2[faceLeft].data(), bufferSizeX2, realMPI, procSend, 201, - BufferRecvX2[faceRight].data(), bufferSizeX2, realMPI, procRecv, 201, - mygrid->CartComm, &status)); - #endif -#endif - myTimer += MPI_Wtime(); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; - // Unpack - BufferLeft=BufferRecvX2[faceLeft]; - BufferRight=BufferRecvX2[faceRight]; - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - // We fill the ghost zones - BufferLeft.Unpack(Vc, map,std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vc, map,std::make_pair(ibeg , iend), - std::make_pair(jbeg+offset , jend+offset), - std::make_pair(kbeg , kend)); - // We fill the ghost zones - - if(haveVs) { - BufferLeft.Unpack(Vs, BX1s, std::make_pair(ibeg, iend+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vs, BX1s, std::make_pair(ibeg, iend+1), - std::make_pair(jbeg+offset , jend+offset), - std::make_pair(kbeg , kend)); - #if DIMENSIONS >= 2 - BufferLeft.Unpack(Vs, BX2s, std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vs, BX2s, std::make_pair(ibeg, iend), - std::make_pair(jbeg+offset+1, jend+offset+1), - std::make_pair(kbeg , kend)); - #endif - #if DIMENSIONS == 3 - BufferLeft.Unpack(Vs, BX3s, std::make_pair(ibeg, iend), - std::make_pair(jbeg, jend), - std::make_pair(kbeg, kend+1)); - - BufferRight.Unpack(Vs, BX3s,std::make_pair(ibeg , iend), - std::make_pair(jbeg+offset, jend+offset), - std::make_pair(kbeg , kend+1)); - #endif - } - - myTimer -= MPI_Wtime(); -#ifdef MPI_NON_BLOCKING - // Wait for the sends if they have not yet completed - MPI_Waitall(2, sendRequest, sendStatus); -#endif - -#ifdef MPI_PERSISTENT - MPI_Waitall(2, sendRequestX2, sendStatus); -#endif - myTimer += MPI_Wtime(); - bytesSentOrReceived += 4*bufferSizeX2*sizeof(real); - - idfx::popRegion(); -} - - -void Mpi::ExchangeX3(IdefixArray4D Vc, IdefixArray4D Vs) { - idfx::pushRegion("Mpi::ExchangeX3"); - - - // Load the buffers with data - int ibeg,iend,jbeg,jend,kbeg,kend,offset,nz; - Buffer BufferLeft=BufferSendX3[faceLeft]; - Buffer BufferRight=BufferSendX3[faceRight]; - IdefixArray1D map = this->mapVars; - - // If MPI Persistent, start receiving even before the buffers are filled - myTimer -= MPI_Wtime(); - - double tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - - MPI_SAFE_CALL(MPI_Startall(2, recvRequestX3)); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; -#endif - myTimer += MPI_Wtime(); - // Coordinates of the ghost region which needs to be transfered - ibeg = 0; - iend = ntot[IDIR]; - - jbeg = 0; - jend = ntot[JDIR]; - - kbeg = 0; - kend = nghost[KDIR]; - offset = end[KDIR]; // Distance between beginning of left and right ghosts - nz = nghost[KDIR]; - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - BufferLeft.Pack(Vc, map, std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+nz, kend+nz)); - - BufferRight.Pack(Vc, map, std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg + offset-nz, kend+ offset-nz)); - - // Load face-centered field in the buffer - if(haveVs) { - BufferLeft.Pack(Vs, BX1s,std::make_pair(ibeg , iend+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+nz , kend+nz)); - - BufferRight.Pack(Vs, BX1s, std::make_pair(ibeg , iend+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg + offset-nz, kend+ offset-nz)); - - #if DIMENSIONS >= 2 - - BufferLeft.Pack(Vs, BX2s,std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg+nz , kend+nz)); - - BufferRight.Pack(Vs, BX2s, std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg + offset-nz, kend+ offset-nz)); - - #endif - - #if DIMENSIONS == 3 - BufferLeft.Pack(Vs, BX3s,std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+nz+1 , kend+nz+1)); - - BufferRight.Pack(Vs, BX3s, std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg + offset-nz, kend+ offset-nz)); - #endif - } - - // Send to the right - Kokkos::fence(); - - myTimer -= MPI_Wtime(); - tStart = MPI_Wtime(); -#ifdef MPI_PERSISTENT - MPI_SAFE_CALL(MPI_Startall(2, sendRequestX3)); - MPI_Waitall(2,recvRequestX3,recvStatus); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; - -#else - int procSend, procRecv; - - #ifdef MPI_NON_BLOCKING - MPI_Status sendStatus[2]; - MPI_Status recvStatus[2]; - MPI_Request sendRequest[2]; - MPI_Request recvRequest[2]; - - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX3[faceRight].data(), bufferSizeX3, realMPI, procSend, 100, - mygrid->CartComm, &sendRequest[0])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX3[faceLeft].data(), bufferSizeX3, realMPI, procRecv, 100, - mygrid->CartComm, &recvRequest[0])); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Isend(BufferSendX3[faceLeft].data(), bufferSizeX3, realMPI, procSend, 101, - mygrid->CartComm, &sendRequest[1])); - - MPI_SAFE_CALL(MPI_Irecv(BufferRecvX3[faceRight].data(), bufferSizeX3, realMPI, procRecv, 101, - mygrid->CartComm, &recvRequest[1])); - - // Wait for recv to complete (we don't care about the sends) - MPI_Waitall(2, recvRequest, recvStatus); - - #else - MPI_Status status; - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX3[faceRight].data(), bufferSizeX3, realMPI, procSend, 300, - BufferRecvX3[faceLeft].data(), bufferSizeX3, realMPI, procRecv, 300, - mygrid->CartComm, &status)); - - // Send to the left - // We receive from procRecv, and we send to procSend - MPI_SAFE_CALL(MPI_Cart_shift(mygrid->CartComm,2,-1,&procRecv,&procSend )); - - MPI_SAFE_CALL(MPI_Sendrecv(BufferSendX3[faceLeft].data(), bufferSizeX3, realMPI, procSend, 301, - BufferRecvX3[faceRight].data(), bufferSizeX3, realMPI, procRecv, 301, - mygrid->CartComm, &status)); - #endif -#endif - myTimer += MPI_Wtime(); - idfx::mpiCallsTimer += MPI_Wtime() - tStart; - // Unpack - BufferLeft=BufferRecvX3[faceLeft]; - BufferRight=BufferRecvX3[faceRight]; - - BufferLeft.ResetPointer(); - BufferRight.ResetPointer(); - - - // We fill the ghost zones - BufferLeft.Unpack(Vc, map,std::make_pair(ibeg, iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vc, map,std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+offset , kend+offset)); - // We fill the ghost zones - - if(haveVs) { - BufferLeft.Unpack(Vs, BX1s, std::make_pair(ibeg, iend+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg , kend)); - - BufferRight.Unpack(Vs, BX1s, std::make_pair(ibeg, iend+1), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+offset , kend+offset)); - - #if DIMENSIONS >=2 - BufferLeft.Unpack(Vs, BX2s, std::make_pair(ibeg, iend), - std::make_pair(jbeg, jend+1), - std::make_pair(kbeg, kend)); - - BufferRight.Unpack(Vs, BX2s,std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend+1), - std::make_pair(kbeg+offset, kend+offset)); - #endif - - #if DIMENSIONS == 3 - BufferLeft.Unpack(Vs, BX3s, std::make_pair(ibeg, iend), - std::make_pair(jbeg, jend), - std::make_pair(kbeg, kend)); - - BufferRight.Unpack(Vs, BX3s,std::make_pair(ibeg , iend), - std::make_pair(jbeg , jend), - std::make_pair(kbeg+offset+1, kend+offset+1)); - #endif - } - - myTimer -= MPI_Wtime(); -#ifdef MPI_NON_BLOCKING - // Wait for the sends if they have not yet completed - MPI_Waitall(2, sendRequest, sendStatus); -#endif - -#ifdef MPI_PERSISTENT - MPI_Waitall(2, sendRequestX3, sendStatus); -#endif - myTimer += MPI_Wtime(); - bytesSentOrReceived += 4*bufferSizeX3*sizeof(real); - - idfx::popRegion(); -} - - - -void Mpi::CheckConfig() { - idfx::pushRegion("Mpi::CheckConfig"); - // compile time check - #ifdef KOKKOS_ENABLE_CUDA - #if defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT - #error Your MPI library is not CUDA Aware (check Idefix requirements). - #endif - #endif /* MPIX_CUDA_AWARE_SUPPORT */ - - // Run-time check that we can do a reduce on device arrays - IdefixArray1D src("MPIChecksrc",1); - IdefixArray1D::HostMirror srcHost = Kokkos::create_mirror_view(src); - - if(idfx::prank == 0) { - srcHost(0) = 0; - Kokkos::deep_copy(src, srcHost); - } - - if(idfx::psize > 1) { - MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); - - // Capture segfaults - struct sigaction newHandler; - struct sigaction oldHandler; - memset(&newHandler, 0, sizeof(newHandler)); - newHandler.sa_flags = SA_SIGINFO; - newHandler.sa_sigaction = Mpi::SigErrorHandler; - sigaction(SIGSEGV, &newHandler, &oldHandler); - try { - // We next circulate the info round-robin accross all the nodes to check that - // MPI can exchange buffers in idefix arrays - - MPI_Status status; - int ierrSend, ierrRecv; - if(idfx::prank == 0) { - ierrSend = MPI_Send(src.data(), 1, MPI_INT64_T, idfx::prank+1, 1, MPI_COMM_WORLD); - ierrRecv = MPI_Recv(src.data(), 1, MPI_INT64_T, idfx::psize-1, 1, MPI_COMM_WORLD, &status); - } else { - ierrRecv = MPI_Recv(src.data(), 1, MPI_INT64_T, idfx::prank-1, 1, MPI_COMM_WORLD, &status); - // Add our own rank to the data - Kokkos::deep_copy(srcHost, src); - srcHost(0) += idfx::prank; - Kokkos::deep_copy(src, srcHost); - ierrSend = MPI_Send(src.data(), 1, MPI_INT64_T, (idfx::prank+1)%idfx::psize, 1, - MPI_COMM_WORLD); - } - - if(ierrSend != 0) { - char MPImsg[MPI_MAX_ERROR_STRING]; - int MPImsgLen; - MPI_Error_string(ierrSend, MPImsg, &MPImsgLen); - throw std::runtime_error(std::string(MPImsg, MPImsgLen)); - } - if(ierrRecv != 0) { - char MPImsg[MPI_MAX_ERROR_STRING]; - int MPImsgLen; - MPI_Error_string(ierrSend, MPImsg, &MPImsgLen); - throw std::runtime_error(std::string(MPImsg, MPImsgLen)); - } - } catch(std::exception &e) { - std::stringstream errmsg; - errmsg << "Your MPI library is unable to perform Send/Recv on Idefix arrays."; - errmsg << std::endl; - #ifdef KOKKOS_ENABLE_CUDA - errmsg << "Check that your MPI library is CUDA aware." << std::endl; - #elif defined(KOKKOS_ENABLE_HIP) - errmsg << "Check that your MPI library is RocM aware." << std::endl; - #else - errmsg << "Check your MPI library configuration." << std::endl; - #endif - errmsg << "Error: " << e.what() << std::endl; - IDEFIX_ERROR(errmsg); - } - // Restore old handlers - sigaction(SIGSEGV, &oldHandler, NULL ); - MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); - } - - // Check that we have the proper end result - Kokkos::deep_copy(srcHost, src); - int64_t size = static_cast(idfx::psize); - int64_t rank = static_cast(idfx::prank); - int64_t result = rank == 0 ? size*(size-1)/2 : rank*(rank+1)/2; - - if(srcHost(0) != result) { - idfx::cout << "got " << srcHost(0) << " expected " << result << std::endl; - std::stringstream errmsg; - errmsg << "Your MPI library managed to perform MPI exchanges on Idefix Arrays, but the result "; - errmsg << "is incorrect. " << std::endl; - errmsg << "Check your MPI library configuration." << std::endl; - IDEFIX_ERROR(errmsg); - } - idfx::popRegion(); -} - -void Mpi::SigErrorHandler(int nSignum, siginfo_t* si, void* vcontext) { - std::stringstream errmsg; - errmsg << "A segmentation fault was triggered while attempting to test your MPI library."; - errmsg << std::endl; - errmsg << "Your MPI library is unable to perform reductions on Idefix arrays."; - errmsg << std::endl; - #ifdef KOKKOS_ENABLE_CUDA - errmsg << "Check that your MPI library is CUDA aware." << std::endl; - #elif defined(KOKKOS_ENABLE_HIP) - errmsg << "Check that your MPI library is RocM aware." << std::endl; - #else - errmsg << "Check your MPI library configuration." << std::endl; - #endif - IDEFIX_ERROR(errmsg); -} - -// This routine check that all of the processes are synced. -// Returns true if this is the case, false otherwise - -bool Mpi::CheckSync(real timeout) { - // If no parallelism, then we're in sync! - if(idfx::psize == 1) return(true); - - int send = idfx::prank; - int recv = 0; - MPI_Request request; - - MPI_Iallreduce(&send, &recv, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &request); - - double start = MPI_Wtime(); - int flag = 0; - MPI_Status status; - - while((MPI_Wtime()-start < timeout) && !flag) { - MPI_Test(&request, &flag, &status); - // sleep for 10 ms - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - if(!flag) { - // We did not managed to do an allreduce, so this is a failure. - return(false); - } - if(recv != idfx::psize*(idfx::psize-1)/2) { - IDEFIX_ERROR("wrong result for synchronisation"); - } - - return(true); -} diff --git a/src/mpi.hpp b/src/mpi.hpp deleted file mode 100644 index a4250a17c..000000000 --- a/src/mpi.hpp +++ /dev/null @@ -1,269 +0,0 @@ -// *********************************************************************************** -// Idefix MHD astrophysical code -// Copyright(C) Geoffroy R. J. Lesur -// and other code contributors -// Licensed under CeCILL 2.1 License, see COPYING for more information -// *********************************************************************************** - -#ifndef MPI_HPP_ -#define MPI_HPP_ - -#include -#include -#include -#include "idefix.hpp" -#include "grid.hpp" - - -class DataBlock; -class Buffer { - public: - Buffer() = default; - explicit Buffer(size_t size): pointer{0}, array{IdefixArray1D("BufferArray",size)} { }; - - void* data() { - return(array.data()); - } - - int Size() { - return(array.size()); - } - - void ResetPointer() { - this->pointer = 0; - } - - void Pack(IdefixArray3D& in, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - - auto arr = this->array; - idefix_for("LoadBuffer3D",kb.first,kb.second,jb.first,jb.second,ib.first,ib.second, - KOKKOS_LAMBDA (int k, int j, int i) { - arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ) = in(k,j,i); - }); - - // Update pointer - this->pointer += ninjnk; - } - - void Pack(IdefixArray4D& in, - const int var, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - - auto arr = this->array; - idefix_for("LoadBuffer4D",kb.first,kb.second,jb.first,jb.second,ib.first,ib.second, - KOKKOS_LAMBDA (int k, int j, int i) { - arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ) = in(var, k,j,i); - }); - - // Update pointer - this->pointer += ninjnk; - } - - void Pack(IdefixArray4D& in, - IdefixArray1D& map, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - auto arr = this->array; - - idefix_for("LoadBuffer4D",0,map.size(), - kb.first,kb.second, - jb.first,jb.second, - ib.first,ib.second, - KOKKOS_LAMBDA (int n, int k, int j, int i) { - arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + n*ninjnk + offset ) = in(map(n), k,j,i); - }); - - // Update pointer - this->pointer += ninjnk*map.size(); - } - - void Unpack(IdefixArray3D& out, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - auto arr = this->array; - - idefix_for("LoadBuffer3D",kb.first,kb.second,jb.first,jb.second,ib.first,ib.second, - KOKKOS_LAMBDA (int k, int j, int i) { - out(k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ); - }); - - // Update pointer - this->pointer += ninjnk; - } - - void Unpack(IdefixArray4D& out, - const int var, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - - auto arr = this->array; - idefix_for("LoadBuffer3D",kb.first,kb.second,jb.first,jb.second,ib.first,ib.second, - KOKKOS_LAMBDA (int k, int j, int i) { - out(var,k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ); - }); - - // Update pointer - this->pointer += ninjnk; - } - - void Unpack(IdefixArray4D& out, - IdefixArray1D& map, - std::pair ib, - std::pair jb, - std::pair kb) { - const int ni = ib.second-ib.first; - const int ninj = (jb.second-jb.first)*ni; - const int ninjnk = (kb.second-kb.first)*ninj; - const int ibeg = ib.first; - const int jbeg = jb.first; - const int kbeg = kb.first; - const int offset = this->pointer; - - auto arr = this->array; - idefix_for("LoadBuffer4D",0,map.size(), - kb.first,kb.second, - jb.first,jb.second, - ib.first,ib.second, - KOKKOS_LAMBDA (int n, int k, int j, int i) { - out(map(n),k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + n*ninjnk + offset ); - }); - - // Update pointer - this->pointer += ninjnk*map.size(); - } - - - private: - size_t pointer; - IdefixArray1D array; -}; - -class Mpi { - public: - Mpi() = default; - // MPI Exchange functions - void ExchangeAll(); ///< Exchange boundary elements in all directions (todo) - void ExchangeX1(IdefixArray4D inputVc, - IdefixArray4D inputVs = IdefixArray4D()); - ///< Exchange boundary elements in the X1 direction - void ExchangeX2(IdefixArray4D inputVc, - IdefixArray4D inputVs = IdefixArray4D()); - ///< Exchange boundary elements in the X2 direction - void ExchangeX3(IdefixArray4D inputVc, - IdefixArray4D inputVs = IdefixArray4D()); - ///< Exchange boundary elements in the X3 direction - - // Init from datablock - void Init(Grid *grid, std::vector inputMap, - int nghost[3], int nint[3], bool inputHaveVs = false ); - - // Check that MPI will work with the designated target (in particular GPU Direct) - static void CheckConfig(); - - // Check that MPI processes are synced - static bool CheckSync(real); - - - // Destructor - ~Mpi(); - - private: - // Because the MPI class initialise internal pointers, we do not allow copies of this class - // These lines should not be removed as they constitute a safeguard - Mpi(const Mpi&); - Mpi operator=(const Mpi&); - - static int nInstances; // total number of mpi instances in the code - int thisInstance; // unique number of the current instance - int nReferences; // # of references to this instance - bool isInitialized{false}; - - DataBlock *data; // pointer to datablock object - - enum {faceRight, faceLeft}; - - // Buffers for MPI calls - Buffer BufferSendX1[2]; - Buffer BufferSendX2[2]; - Buffer BufferSendX3[2]; - Buffer BufferRecvX1[2]; - Buffer BufferRecvX2[2]; - Buffer BufferRecvX3[2]; - - IdefixArray1D mapVars; - int mapNVars{0}; - - int nint[3]; //< number of internal elements of the arrays we treat - int nghost[3]; //< number of ghost zone of the arrays we treat - int ntot[3]; //< total number of cells of the arrays we treat - int beg[3]; //< begining index of the active zone - int end[3]; //< end index of the active zone - - int bufferSizeX1; - int bufferSizeX2; - int bufferSizeX3; - - bool haveVs{false}; - - // Requests for MPI persistent communications - MPI_Request sendRequestX1[2]; - MPI_Request sendRequestX2[2]; - MPI_Request sendRequestX3[2]; - MPI_Request recvRequestX1[2]; - MPI_Request recvRequestX2[2]; - MPI_Request recvRequestX3[2]; - - Grid *mygrid; - - // MPI throughput timer specific to this object - double myTimer{0}; - int64_t bytesSentOrReceived{0}; - - // Error handler used by CheckConfig - static void SigErrorHandler(int, siginfo_t* , void* ); -}; - -#endif // MPI_HPP_ diff --git a/src/mpi/CMakeLists.txt b/src/mpi/CMakeLists.txt new file mode 100644 index 000000000..6042da654 --- /dev/null +++ b/src/mpi/CMakeLists.txt @@ -0,0 +1,7 @@ +target_sources(idefix + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/buffer.hpp + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/exchanger.hpp + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/exchanger.cpp + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/mpi.hpp + PUBLIC ${CMAKE_CURRENT_LIST_DIR}/mpi.cpp +) diff --git a/src/mpi/buffer.hpp b/src/mpi/buffer.hpp new file mode 100644 index 000000000..ff4dff146 --- /dev/null +++ b/src/mpi/buffer.hpp @@ -0,0 +1,195 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + +#ifndef MPI_BUFFER_HPP_ +#define MPI_BUFFER_HPP_ + +#include "idefix.hpp" +#include "arrays.hpp" + +using BoundingBox = std::array,3>; + + +class Buffer { + public: + Buffer() = default; + explicit Buffer(size_t size): pointer{0}, array{IdefixArray1D("BufferArray",size)} {}; + + // Compute the size of a bounding box + static size_t ComputeBoxSize(BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + return(ninjnk); + } + + void* data() { + return(array.data()); + } + + int Size() { + return(array.size()); + } + + void ResetPointer() { + this->pointer = 0; + } + + void Pack(IdefixArray3D& in, BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + + auto arr = this->array; + idefix_for("LoadBuffer3D",kbeg,kend,jbeg,jend,ibeg,iend, + KOKKOS_LAMBDA (int k, int j, int i) { + arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ) = in(k,j,i); + }); + + // Update pointer + this->pointer += ninjnk; + } + + void Pack(IdefixArray4D& in, + const int var, + BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + + auto arr = this->array; + idefix_for("LoadBuffer4D_var",kbeg,kend,jbeg,jend,ibeg,iend, + KOKKOS_LAMBDA (int k, int j, int i) { + arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ) = in(var, k,j,i); + }); + + // Update pointer + this->pointer += ninjnk; + } + + void Pack(IdefixArray4D& in, + IdefixArray1D& map, + BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + auto arr = this->array; + + idefix_for("LoadBuffer4D_map",0,map.size(), + kbeg,kend, + jbeg,jend, + ibeg,iend, + KOKKOS_LAMBDA (int n, int k, int j, int i) { + arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + n*ninjnk + offset ) = in(map(n), k,j,i); + }); + + // Update pointer + this->pointer += ninjnk*map.size(); + } + + void Unpack(IdefixArray3D& out, + BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + auto arr = this->array; + + idefix_for("UnLoadBuffer3D",kbeg,kend,jbeg,jend,ibeg,iend, + KOKKOS_LAMBDA (int k, int j, int i) { + out(k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ); + }); + + // Update pointer + this->pointer += ninjnk; + } + + void Unpack(IdefixArray4D& out, + const int var, + BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + + auto arr = this->array; + idefix_for("UnLoadBuffer4D_var",kbeg,kend,jbeg,jend,ibeg,iend, + KOKKOS_LAMBDA (int k, int j, int i) { + out(var,k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + offset ); + }); + + // Update pointer + this->pointer += ninjnk; + } + + void Unpack(IdefixArray4D& out, + IdefixArray1D& map, + BoundingBox box) { + const int ni = box[IDIR][1]-box[IDIR][0]; + const int ninj = (box[JDIR][1]-box[JDIR][0])*ni; + const int ninjnk = (box[KDIR][1]-box[KDIR][0])*ninj; + const int ibeg = box[IDIR][0]; + const int jbeg = box[JDIR][0]; + const int kbeg = box[KDIR][0]; + const int iend = box[IDIR][1]; + const int jend = box[JDIR][1]; + const int kend = box[KDIR][1]; + const int offset = this->pointer; + + auto arr = this->array; + idefix_for("UnLoadBuffer4D_map",0,map.size(), + kbeg,kend, + jbeg,jend, + ibeg,iend, + KOKKOS_LAMBDA (int n, int k, int j, int i) { + out(map(n),k,j,i) = arr(i-ibeg + (j-jbeg)*ni + (k-kbeg)*ninj + n*ninjnk + offset ); + }); + + // Update pointer + this->pointer += ninjnk*map.size(); + } + + + private: + size_t pointer; + IdefixArray1D array; +}; + +#endif // MPI_BUFFER_HPP_ diff --git a/src/mpi/exchanger.cpp b/src/mpi/exchanger.cpp new file mode 100644 index 000000000..033ce4041 --- /dev/null +++ b/src/mpi/exchanger.cpp @@ -0,0 +1,308 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + +#include +#include "exchanger.hpp" +#include "idefix.hpp" +#include "buffer.hpp" +#include "grid.hpp" +#include "arrays.hpp" + +//#define MPI_NON_BLOCKING +#define MPI_PERSISTENT + +int Exchanger::nInstances = 0; + +void Exchanger::Init( + Grid *grid, + int direction, + std::vector inputMap, + std::array nghost, + std::array nint, + bool inputHaveVs, + std::array overwriteBXn) { + idfx::pushRegion("Exchanger::Init"); + this->grid = grid; + this->direction = direction; + // Allocate mapVars on target and copy it from the input argument list + this->mapVars = idfx::ConvertVectorToIdefixArray(inputMap); + this->mapNVars = inputMap.size(); + this->haveVs = inputHaveVs; + + // increase the number of instances + this->thisInstance = nInstances; + + // Compute indices of arrays we will be working with + for(int dir = 0 ; dir < 3 ; dir++) { + this->nghost[dir] = nghost[dir]; + this->nint[dir] = nint[dir]; + this->ntot[dir] = nint[dir]+2*nghost[dir]; + this->beg[dir] = nghost[dir]; + this->end[dir] = nghost[dir]+nint[dir]; + } + + ///////////////////////////////////////////////////////////////////////////// + // Init exchange datasets + // Buffer size direction are for sends (i.e. buffer left is for a send from the left side) + + // Make left buffer + // Init zone to the full domain + for(int dir = 0 ; dir < 3 ; dir++) { + if(dir < direction) { + boxRecv[faceLeft][dir][0] = 0; + boxRecv[faceLeft][dir][1] = ntot[dir]; + } else if(dir > direction) { + boxRecv[faceLeft][dir][0] = beg[dir]; + boxRecv[faceLeft][dir][1] = end[dir]; + } else { + // dir == direction + boxRecv[faceLeft][dir][0] = 0; + boxRecv[faceLeft][dir][1] = nghost[dir]; + } + } + // Copy all the boxes from boxRecvLeft + boxRecv[faceRight] = boxRecv[faceLeft]; + boxSend = boxRecv; + + // Adjust the indices for send and receive in the direction of interest + boxRecv[faceRight][direction][0] = end[direction]; + boxRecv[faceRight][direction][1] = ntot[direction]; + + boxSend[faceLeft][direction][0] = beg[direction]; + boxSend[faceLeft][direction][1] = beg[direction]+nghost[direction]; + + boxSend[faceRight][direction][0] = end[direction] - nghost[direction]; + boxSend[faceRight][direction][1] = end[direction]; + + // Face-centered boxes + + // Add one element in the field direction + for(int component = 0 ; component < 3 ; component++) { + // Init as centered boxes + boxSendVs[component] = boxSend; + boxRecvVs[component] = boxRecv; + const int normalDir = component; + if(component != direction) { + for(int face = 0 ; face <2 ; face++) { + boxSendVs[component][face][normalDir][1] += 1; + boxRecvVs[component][face][normalDir][1] += 1; + } + } else { + // component == direction + if(!overwriteBXn[faceLeft]) boxSendVs[component][faceLeft][normalDir][0] += 1; + boxSendVs[component][faceLeft][normalDir][1] += 1; + + if(!overwriteBXn[faceRight]) boxRecvVs[component][faceRight][normalDir][0] += 1; + boxRecvVs[component][faceRight][normalDir][1] += 1; + } + } + + // Compute buffer sizes + for(int face=0 ; face < 2 ; face++) { + bufferSizeSend[face] = mapNVars * Buffer::ComputeBoxSize(boxSend[face]); + bufferSizeRecv[face] = mapNVars * Buffer::ComputeBoxSize(boxRecv[face]); + if(haveVs) { + for(int component = 0 ; component CartComm,direction,1,&procRecv[faceLeft],&procSend[faceRight]); + MPI_Cart_shift(grid->CartComm,direction,-1,&procRecv[faceRight],&procSend[faceLeft]); + + #ifdef MPI_PERSISTENT + + // X1-dir exchanges + // We receive from procRecv, and we send to procSend + + MPI_Send_init(BufferSend[faceRight].data(), bufferSizeSend[faceRight], realMPI, + procSend[faceRight], thisInstance*2, + grid->CartComm, &sendRequest[faceRight]); + + MPI_Recv_init(BufferRecv[faceLeft].data(), bufferSizeRecv[faceLeft], realMPI, + procRecv[faceLeft],thisInstance*2, + grid->CartComm, &recvRequest[faceLeft]); + + // Send to the left + // We receive from procRecv, and we send to procSend + + MPI_Send_init(BufferSend[faceLeft].data(), bufferSizeSend[faceLeft], realMPI, + procSend[faceLeft],thisInstance*2+1, + grid->CartComm, &sendRequest[faceLeft]); + + MPI_Recv_init(BufferRecv[faceRight].data(), bufferSizeRecv[faceRight], realMPI, + procRecv[faceRight], thisInstance*2+1, + grid->CartComm, &recvRequest[faceRight]); + + #endif // MPI_PERSISTENT + + // say this instance is initialized. + isInitialized = true; + nInstances++; + + idfx::popRegion(); +} + +Exchanger::~Exchanger() { + idfx::pushRegion("Exchanger::~Exchanger"); + if(isInitialized) { + // Properly clean up the mess + #ifdef MPI_PERSISTENT + for(int i=0 ; i< 2; i++) { + MPI_Request_free( &sendRequest[i]); + MPI_Request_free( &recvRequest[i]); + } + #endif + isInitialized = false; + } + idfx::popRegion(); +} + +void Exchanger::Exchange(IdefixArray4D Vc, IdefixArray4D Vs) { + idfx::pushRegion("Mpi::ExchangeX1"); + // Load the buffers with data + Buffer BufferLeft = BufferSend[faceLeft]; + Buffer BufferRight = BufferSend[faceRight]; + IdefixArray1D map = this->mapVars; + + bool recvRight = (procRecv[faceRight] != MPI_PROC_NULL); + bool recvLeft = (procRecv[faceLeft] != MPI_PROC_NULL); + + // If MPI Persistent, start receiving even before the buffers are filled + myTimer -= MPI_Wtime(); + double tStart = MPI_Wtime(); +#ifdef MPI_PERSISTENT + MPI_Status sendStatus[2]; + MPI_Status recvStatus[2]; + + MPI_Startall(2, recvRequest); + idfx::mpiCallsTimer += MPI_Wtime() - tStart; +#endif + myTimer += MPI_Wtime(); + + BufferLeft.ResetPointer(); + BufferRight.ResetPointer(); + + BufferLeft.Pack(Vc, map, boxSend[faceLeft]); + BufferRight.Pack(Vc, map, boxSend[faceRight]); + // Load face-centered field in the buffer + if(haveVs) { + for(int component = 0 ; component < DIMENSIONS ; component++) { + BufferLeft.Pack(Vs, component, boxSendVs[component][faceLeft]); + BufferRight.Pack(Vs, component, boxSendVs[component][faceRight]); + } + } + + // Wait for completion before sending out everything + Kokkos::fence(); + myTimer -= MPI_Wtime(); + tStart = MPI_Wtime(); +#ifdef MPI_PERSISTENT + MPI_Startall(2, sendRequest); + // Wait for buffers to be received + MPI_Waitall(2,recvRequest,recvStatus); + +#else + + #ifdef MPI_NON_BLOCKING + MPI_Status sendStatus[2]; + MPI_Status recvStatus[2]; + MPI_Request sendRequest[2]; + MPI_Request recvRequest[2]; + + // We receive from procRecv, and we send to procSend + + MPI_Isend(BufferSend[faceRight].data(), bufferSizeSend[faceRight], realMPI, + procSend[faceRight], 100, mygrid->CartComm, &sendRequest[0]); + + MPI_Irecv(BufferRecv[faceLeft].data(), bufferSizeRecv[faceLeft], realMPI, + procRecv[faceLeft], 100, mygrid->CartComm, &recvRequest[0]); + // Send to the left + // We receive from procRecv, and we send to procSend + + MPI_Isend(BufferSend[faceLeft].data(), bufferSizeSend[faceLeft], realMPI, + procSend[faceLeft], 101, mygrid->CartComm, &sendRequest[1]); + + MPI_Irecv(BufferRecv[faceRight].data(), bufferSizeRecv[faceRight], realMPI, + procRecv[faceRight], 101, mygrid->CartComm, &recvRequest[1]); + + // Wait for recv to complete (we don't care about the sends) + MPI_Waitall(2, recvRequest, recvStatus); + + #else + MPI_Status status; + // Send to the right + // We receive from procRecv, and we send to procSend + + MPI_Sendrecv(BufferSend[faceRight].data(), bufferSizeSend[faceRight], realMPI, + procSend[faceRight], 100, + BufferRecv[faceLeft].data(), bufferSizeRecv[faceLeft], realMPI, + procRecv[faceLeft], 100, + grid->CartComm, &status); + + // Send to the left + // We receive from procRecv, and we send to procSend + + MPI_Sendrecv(BufferSend[faceLeft].data(), bufferSizeSend[faceLeft], realMPI, + procSend[faceLeft], 101, + BufferRecv[faceRight].data(), bufferSizeRecv[faceRight], realMPI, + procRecv[faceRight], 101, + grid->CartComm, &status); + #endif +#endif +myTimer += MPI_Wtime(); +idfx::mpiCallsTimer += MPI_Wtime() - tStart; +// Unpack +BufferLeft=BufferRecv[faceLeft]; +BufferRight=BufferRecv[faceRight]; + +BufferLeft.ResetPointer(); +BufferRight.ResetPointer(); + +if(recvLeft) { + BufferLeft.Unpack(Vc, map, boxRecv[faceLeft]); + if(haveVs) { + for(int component = 0 ; component < DIMENSIONS ; component++) { + BufferLeft.Unpack(Vs, component, boxRecvVs[component][faceLeft]); + } + } +} +if(recvRight) { + BufferRight.Unpack(Vc, map, boxRecv[faceRight]); + if(haveVs) { + for(int component = 0 ; component < DIMENSIONS ; component++) { + BufferRight.Unpack(Vs, component, boxRecvVs[component][faceRight]); + } + } +} +myTimer -= MPI_Wtime(); +#ifdef MPI_NON_BLOCKING + // Wait for the sends if they have not yet completed + MPI_Waitall(2, sendRequest, sendStatus); +#endif + +#ifdef MPI_PERSISTENT + MPI_Waitall(2, sendRequest, sendStatus); +#endif + myTimer += MPI_Wtime(); + bytesSentOrReceived += (bufferSizeRecv[faceLeft] + +bufferSizeSend[faceLeft] + +bufferSizeRecv[faceRight] + +bufferSizeSend[faceRight])*sizeof(real); + + idfx::popRegion(); +} diff --git a/src/mpi/exchanger.hpp b/src/mpi/exchanger.hpp new file mode 100644 index 000000000..2e1affcb0 --- /dev/null +++ b/src/mpi/exchanger.hpp @@ -0,0 +1,82 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + +#ifndef MPI_EXCHANGER_HPP_ +#define MPI_EXCHANGER_HPP_ + +#include + +#include +#include +#include +#include "idefix.hpp" +#include "grid.hpp" +#include "buffer.hpp" +#include "arrays.hpp" + +class Grid; + +class Exchanger { + public: + Exchanger() = default; + void Init( Grid* grid, + int direction, + std::vector inputMap, + std::array nghost, + std::array nint, + bool inputHaveVs = false, + std::array overwriteBXn = {true, true}); + + void Exchange(IdefixArray4D Vc, IdefixArray4D Vs); + ~Exchanger(); + + static int nInstances; // total number of mpi instances in the code + int thisInstance; // unique number of the current instance + bool isInitialized{false}; + + // MPI throughput timer specific to this object + double myTimer{0}; + int64_t bytesSentOrReceived{0}; + + // Buffer sizes for throughput calculations + std::array bufferSizeSend; + std::array bufferSizeRecv; + + private: + enum {faceRight, faceLeft}; + + std::array boxSend, boxRecv; // bounding boxes for each face + std::array,3> boxSendVs, boxRecvVs; // 3= 3 field components + + // Buffers for MPI calls + Buffer BufferSend[2]; + Buffer BufferRecv[2]; + + int procSend[2]; // MPI process to send to in X1 direction + int procRecv[2]; // MPI process to receive from in X1 direction + + int direction; + IdefixArray1D mapVars; + int mapNVars{0}; + + int nint[3]; //< number of internal elements of the arrays we treat + int nghost[3]; //< number of ghost zone of the arrays we treat + int ntot[3]; //< total number of cells of the arrays we treat + int beg[3]; //< begining index of the active zone + int end[3]; //< end index of the active zone + + bool haveVs{false}; + + // Requests for MPI persistent communications + MPI_Request sendRequest[2]; + MPI_Request recvRequest[2]; + + Grid *grid; +}; + + +#endif // MPI_EXCHANGER_HPP_ diff --git a/src/mpi/mpi.cpp b/src/mpi/mpi.cpp new file mode 100644 index 000000000..36b4eb9a3 --- /dev/null +++ b/src/mpi/mpi.cpp @@ -0,0 +1,265 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + + +#include "mpi.hpp" +#include +#include +#include // NOLINT [build/c++11] +#include // NOLINT [build/c++11] +#include +#include +#include "idefix.hpp" +#include "dataBlock.hpp" + + +#if defined(OPEN_MPI) && OPEN_MPI +#include "mpi-ext.h" // Needed for CUDA-aware check */ +#endif + + + + +// init the number of instances +int Mpi::nInstances = 0; + +// MPI Routines exchange +void Mpi::ExchangeAll() { + IDEFIX_ERROR("Not Implemented"); +} + +/// +/// Initialise an instance of the MPI class. +/// @param grid: pointer to the grid object (needed to get the MPI neighbours) +/// @param inputMap: 1st indices of inputVc which are to be exchanged (i.e, the list of variables) +/// @param nghost: size of the ghost region in each direction +/// @param nint: size of the internal region in each direction +/// @param inputHaveVs: whether the instance should also treat face-centered variable +/// (optional, default false) +/// + +void Mpi::Init(Grid *grid, std::vector inputMap, + std::array nghost, std::array nint, + std::array lbound, + std::array rbound, + bool inputHaveVs) { + idfx::pushRegion("Mpi::Init"); + + // increase the number of instances + nInstances++; + thisInstance=nInstances; + + for(int dir=0; dir<3; dir++) { + std::array overWriteBXn = {true, true}; + if(lbound[dir] == BoundaryType::shearingbox) { + overWriteBXn[faceLeft] = false; + } + if(rbound[dir] == BoundaryType::shearingbox) { + overWriteBXn[faceRight] = false; + } + + exchanger[dir].Init(grid, dir, inputMap, + nghost, nint, + inputHaveVs, overWriteBXn); + } + + isInitialized = true; + idfx::popRegion(); +} + +// Destructor (clean up persistent communication channels) +Mpi::~Mpi() { + idfx::pushRegion("Mpi::~Mpi"); + if(isInitialized) { + if(thisInstance==1) { + int bytesSentOrReceived = 0; + double myTimer = 0; + for(int dir=0; dir<3; dir++) { + bytesSentOrReceived += exchanger[dir].bytesSentOrReceived; + myTimer += exchanger[dir].myTimer; + } + idfx::cout << "Mpi(" << thisInstance << "): measured throughput is " + << bytesSentOrReceived/myTimer/1024.0/1024.0 << " MB/s" << std::endl; + idfx::cout << "Mpi(" << thisInstance << "): message sizes were " << std::endl; + idfx::cout << " X1: " + << exchanger[IDIR].bufferSizeSend[0]*sizeof(real)/1024.0/1024.0 + << " MB" << std::endl; + idfx::cout << " X2: " + << exchanger[JDIR].bufferSizeSend[0]*sizeof(real)/1024.0/1024.0 + << " MB" << std::endl; + idfx::cout << " X3: " + << exchanger[KDIR].bufferSizeSend[0]*sizeof(real)/1024.0/1024.0 + << " MB" << std::endl; + } + isInitialized = false; + } + idfx::popRegion(); +} + +void Mpi::ExchangeX1(IdefixArray4D Vc, IdefixArray4D Vs) { + idfx::pushRegion("Mpi::ExchangeX1"); + + exchanger[IDIR].Exchange(Vc, Vs); + idfx::popRegion(); +} + +void Mpi::ExchangeX2(IdefixArray4D Vc, IdefixArray4D Vs) { + idfx::pushRegion("Mpi::ExchangeX2"); + exchanger[JDIR].Exchange(Vc, Vs); + idfx::popRegion(); +} + +void Mpi::ExchangeX3(IdefixArray4D Vc, IdefixArray4D Vs) { + idfx::pushRegion("Mpi::ExchangeX3"); + exchanger[KDIR].Exchange(Vc, Vs); + idfx::popRegion(); +} + + +void Mpi::CheckConfig() { + idfx::pushRegion("Mpi::CheckConfig"); + // compile time check + #ifdef KOKKOS_ENABLE_CUDA + #if defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT + #error Your MPI library is not CUDA Aware (check Idefix requirements). + #endif + #endif /* MPIX_CUDA_AWARE_SUPPORT */ + + // Run-time check that we can do a reduce on device arrays + IdefixArray1D src("MPIChecksrc",1); + IdefixArray1D::HostMirror srcHost = Kokkos::create_mirror_view(src); + + if(idfx::prank == 0) { + srcHost(0) = 0; + Kokkos::deep_copy(src, srcHost); + } + + if(idfx::psize > 1) { + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); + + // Capture segfaults + struct sigaction newHandler; + struct sigaction oldHandler; + memset(&newHandler, 0, sizeof(newHandler)); + newHandler.sa_flags = SA_SIGINFO; + newHandler.sa_sigaction = Mpi::SigErrorHandler; + sigaction(SIGSEGV, &newHandler, &oldHandler); + try { + // We next circulate the info round-robin accross all the nodes to check that + // MPI can exchange buffers in idefix arrays + + MPI_Status status; + int ierrSend, ierrRecv; + if(idfx::prank == 0) { + ierrSend = MPI_Send(src.data(), 1, MPI_INT64_T, idfx::prank+1, 1, MPI_COMM_WORLD); + ierrRecv = MPI_Recv(src.data(), 1, MPI_INT64_T, idfx::psize-1, 1, MPI_COMM_WORLD, &status); + } else { + ierrRecv = MPI_Recv(src.data(), 1, MPI_INT64_T, idfx::prank-1, 1, MPI_COMM_WORLD, &status); + // Add our own rank to the data + Kokkos::deep_copy(srcHost, src); + srcHost(0) += idfx::prank; + Kokkos::deep_copy(src, srcHost); + ierrSend = MPI_Send(src.data(), 1, MPI_INT64_T, (idfx::prank+1)%idfx::psize, 1, + MPI_COMM_WORLD); + } + + if(ierrSend != 0) { + char MPImsg[MPI_MAX_ERROR_STRING]; + int MPImsgLen; + MPI_Error_string(ierrSend, MPImsg, &MPImsgLen); + throw std::runtime_error(std::string(MPImsg, MPImsgLen)); + } + if(ierrRecv != 0) { + char MPImsg[MPI_MAX_ERROR_STRING]; + int MPImsgLen; + MPI_Error_string(ierrSend, MPImsg, &MPImsgLen); + throw std::runtime_error(std::string(MPImsg, MPImsgLen)); + } + } catch(std::exception &e) { + std::stringstream errmsg; + errmsg << "Your MPI library is unable to perform Send/Recv on Idefix arrays."; + errmsg << std::endl; + #ifdef KOKKOS_ENABLE_CUDA + errmsg << "Check that your MPI library is CUDA aware." << std::endl; + #elif defined(KOKKOS_ENABLE_HIP) + errmsg << "Check that your MPI library is RocM aware." << std::endl; + #else + errmsg << "Check your MPI library configuration." << std::endl; + #endif + errmsg << "Error: " << e.what() << std::endl; + IDEFIX_ERROR(errmsg); + } + // Restore old handlers + sigaction(SIGSEGV, &oldHandler, NULL ); + MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); + } + + // Check that we have the proper end result + Kokkos::deep_copy(srcHost, src); + int64_t size = static_cast(idfx::psize); + int64_t rank = static_cast(idfx::prank); + int64_t result = rank == 0 ? size*(size-1)/2 : rank*(rank+1)/2; + + if(srcHost(0) != result) { + idfx::cout << "got " << srcHost(0) << " expected " << result << std::endl; + std::stringstream errmsg; + errmsg << "Your MPI library managed to perform MPI exchanges on Idefix Arrays, but the result "; + errmsg << "is incorrect. " << std::endl; + errmsg << "Check your MPI library configuration." << std::endl; + IDEFIX_ERROR(errmsg); + } + idfx::popRegion(); +} + +void Mpi::SigErrorHandler(int nSignum, siginfo_t* si, void* vcontext) { + std::stringstream errmsg; + errmsg << "A segmentation fault was triggered while attempting to test your MPI library."; + errmsg << std::endl; + errmsg << "Your MPI library is unable to perform reductions on Idefix arrays."; + errmsg << std::endl; + #ifdef KOKKOS_ENABLE_CUDA + errmsg << "Check that your MPI library is CUDA aware." << std::endl; + #elif defined(KOKKOS_ENABLE_HIP) + errmsg << "Check that your MPI library is RocM aware." << std::endl; + #else + errmsg << "Check your MPI library configuration." << std::endl; + #endif + IDEFIX_ERROR(errmsg); +} + +// This routine check that all of the processes are synced. +// Returns true if this is the case, false otherwise + +bool Mpi::CheckSync(real timeout) { + // If no parallelism, then we're in sync! + if(idfx::psize == 1) return(true); + + int send = idfx::prank; + int recv = 0; + MPI_Request request; + + MPI_Iallreduce(&send, &recv, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &request); + + double start = MPI_Wtime(); + int flag = 0; + MPI_Status status; + + while((MPI_Wtime()-start < timeout) && !flag) { + MPI_Test(&request, &flag, &status); + // sleep for 10 ms + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + if(!flag) { + // We did not managed to do an allreduce, so this is a failure. + return(false); + } + if(recv != idfx::psize*(idfx::psize-1)/2) { + IDEFIX_ERROR("wrong result for synchronisation"); + } + + return(true); +} diff --git a/src/mpi/mpi.hpp b/src/mpi/mpi.hpp new file mode 100644 index 000000000..b8f929326 --- /dev/null +++ b/src/mpi/mpi.hpp @@ -0,0 +1,74 @@ +// *********************************************************************************** +// Idefix MHD astrophysical code +// Copyright(C) Geoffroy R. J. Lesur +// and other code contributors +// Licensed under CeCILL 2.1 License, see COPYING for more information +// *********************************************************************************** + +#ifndef MPI_MPI_HPP_ +#define MPI_MPI_HPP_ + +#include + +#include +#include +#include +#include "idefix.hpp" +#include "grid.hpp" +#include "buffer.hpp" +#include "exchanger.hpp" + + +class DataBlock; + + +class Mpi { + public: + Mpi() = default; + // MPI Exchange functions + void ExchangeAll(); ///< Exchange boundary elements in all directions (todo) + void ExchangeX1(IdefixArray4D inputVc, + IdefixArray4D inputVs = IdefixArray4D()); + ///< Exchange boundary elements in the X1 direction + void ExchangeX2(IdefixArray4D inputVc, + IdefixArray4D inputVs = IdefixArray4D()); + ///< Exchange boundary elements in the X2 direction + void ExchangeX3(IdefixArray4D inputVc, + IdefixArray4D inputVs = IdefixArray4D()); + ///< Exchange boundary elements in the X3 direction + + // Init from datablock + void Init(Grid *grid, std::vector inputMap, + std::array nghost, std::array nint, + std::array lbound, + std::array rbound, + bool inputHaveVs = false ); + + // Check that MPI will work with the designated target (in particular GPU Direct) + static void CheckConfig(); + + // Check that MPI processes are synced + static bool CheckSync(real); + + + // Destructor + ~Mpi(); + + private: + enum {faceRight, faceLeft}; + // Because the MPI class initialise internal pointers, we do not allow copies of this class + // These lines should not be removed as they constitute a safeguard + Mpi(const Mpi&); + Mpi operator=(const Mpi&); + + static int nInstances; // total number of mpi instances in the code + int thisInstance; // unique number of the current instance + int nReferences; // # of references to this instance + bool isInitialized{false}; + + std::array exchanger; ///< exchangers in each direction + // Error handler used by CheckConfig + static void SigErrorHandler(int, siginfo_t* , void* ); +}; + +#endif // MPI_MPI_HPP_ diff --git a/src/output/dump.cpp b/src/output/dump.cpp index 955e06e72..96a40b548 100644 --- a/src/output/dump.cpp +++ b/src/output/dump.cpp @@ -71,7 +71,7 @@ void Dump::CreateMPIDataType(GridBox gb, bool read) { int size[3]; int subsize[3]; - // the grid is required to now the current MPÏ domain decomposition + // the grid is required to know the current MPÏ domain decomposition Grid *grid = data->mygrid; // Dimensions for cell-centered fields diff --git a/src/output/dump.hpp b/src/output/dump.hpp index d823ddcaf..87cb7a3ba 100644 --- a/src/output/dump.hpp +++ b/src/output/dump.hpp @@ -88,13 +88,13 @@ class DumpField { h4Darray, var, Kokkos::ALL, Kokkos::ALL, Kokkos::ALL); return(arr3D); } else if(arrayType==Device3D) { - IdefixHostArray3D arr3D = Kokkos::create_mirror(d3Darray); + IdefixHostArray3D arr3D = Kokkos::create_mirror(Kokkos::HostSpace(), d3Darray); Kokkos::deep_copy(arr3D,d3Darray); return(arr3D); } else if(arrayType==Device4D) { IdefixArray3D arrDev3D = Kokkos::subview( d4Darray, var, Kokkos::ALL, Kokkos::ALL, Kokkos::ALL); - IdefixHostArray3D arr3D = Kokkos::create_mirror(arrDev3D); + IdefixHostArray3D arr3D = Kokkos::create_mirror(Kokkos::HostSpace(), arrDev3D); Kokkos::deep_copy(arr3D,arrDev3D); return(arr3D); } else { diff --git a/src/output/scalarField.hpp b/src/output/scalarField.hpp index 9af7b1986..6cbb9dd40 100644 --- a/src/output/scalarField.hpp +++ b/src/output/scalarField.hpp @@ -33,7 +33,7 @@ class ScalarField { h4Darray, var, Kokkos::ALL, Kokkos::ALL, Kokkos::ALL); return(arr3D); } else if(type==Device3D) { - IdefixHostArray3D arr3D = Kokkos::create_mirror(d3Darray); + IdefixHostArray3D arr3D = Kokkos::create_mirror(Kokkos::HostSpace(), d3Darray); Kokkos::deep_copy(arr3D,d3Darray); return(arr3D); } else if(type==Device4D) { diff --git a/src/output/xdmf.cpp b/src/output/xdmf.cpp index 23ddaa03a..4e7ff6843 100644 --- a/src/output/xdmf.cpp +++ b/src/output/xdmf.cpp @@ -286,18 +286,18 @@ int Xdmf::Write() { idfx::pushRegion("Xdmf::Write"); fs::path filename; fs::path filename_xmf; - hid_t err; + [[maybe_unused]] hid_t err; idfx::cout << "Xdmf: Write file n " << xdmfFileNumber << "..." << std::flush; timer.reset(); // Create a copy of the dataBlock on Host, and sync it. #if DIMENSIONS == 1 - int tot_dim = 1; + [[maybe_unused]] int tot_dim = 1; #elif DIMENSIONS == 2 int tot_dim = 2; #elif DIMENSIONS == 3 - int tot_dim = 3; + [[maybe_unused]] int tot_dim = 3; #endif std::stringstream ssfileName, ssfileNameXmf, ssxdmfFileNum; @@ -344,7 +344,8 @@ int Xdmf::Write() { #endif // Layout of the field data in memory - hsize_t field_data_size[3], field_data_start[3], field_data_subsize[3], stride[3]; + [[maybe_unused]] hsize_t field_data_size[3], field_data_start[3]; + [[maybe_unused]] hsize_t field_data_subsize[3], stride[3]; #ifdef WITH_MPI for(int dir = 0; dir < 3 ; dir++) { field_data_size[dir] = static_cast(this->mpi_data_size[dir]); @@ -454,11 +455,10 @@ void Xdmf::WriteHeader( hid_t tspace, tattr; hid_t unit_info, unit_attr; hid_t group; - hid_t file_access = 0; #ifdef WITH_MPI hid_t plist_id_mpiio = 0; /* for collective MPI I/O */ #endif - hid_t err; + [[maybe_unused]] hid_t err; hsize_t dimstr; @@ -814,7 +814,7 @@ void Xdmf::WriteScalar( dataset_name = var_name.c_str(); std::string dataset_label = dataset_name.c_str(); std::transform(dataset_label.begin(), dataset_label.end(), dataset_label.begin(), ::tolower); - hid_t err, dataset; + [[maybe_unused]] hid_t err, dataset; // We define the dataset that contain the fields. diff --git a/src/pydefix.cpp b/src/pydefix.cpp index f0d8bc784..1b8168088 100644 --- a/src/pydefix.cpp +++ b/src/pydefix.cpp @@ -68,7 +68,7 @@ py::array_t GatherIdefixArray(IdefixHostArray3D // np_int: size that should be copied into global // beg: offset in the incoming array where copy should begin // gbeg: offset in the global array where copy should be begin - std::array np_int,np_tot, beg, gbeg; + [[maybe_unused]] std::array np_int,np_tot, beg, gbeg; IdefixHostArray3D buf; if(rank==0) { @@ -127,7 +127,7 @@ py::array_t GatherIdefixArray(IdefixHostArray3D }// End loop on target rank for root process } else { // MPI prank >0 std::array np_int = dataHost.np_int; - std::array np_tot = dataHost.np_tot; + [[maybe_unused]] std::array np_tot = dataHost.np_tot; std::array gbeg = dataHost.gbeg; std::array beg = dataHost.beg; @@ -228,6 +228,11 @@ PYBIND11_EMBEDDED_MODULE(pydefix, m) { m.attr("BX1s") = BX1s; , m.attr("BX2s") = BX2s; , m.attr("BX3s") = BX3s; ) + #ifdef EVOLVE_VECTOR_POTENTIAL + m.attr("AX1e") = AX1e; + m.attr("AX2e") = AX2e; + m.attr("AX3e") = AX3e; + #endif #endif m.attr("IDIR") = IDIR; m.attr("JDIR") = JDIR; @@ -273,6 +278,9 @@ Pydefix::Pydefix(Input &input) { idfx::cout << "Pydefix: start Python interpreter." << std::endl; py::initialize_interpreter(); + py::exec("import sys; print(f'Pydefix: Python Version: {sys.version}')"); + py::exec("print(f'Pydefix: Executable Path: {sys.executable}')"); + py::exec("print(f'Pydefix: Sys Path: {sys.path}')"); } this->scriptFilename = input.Get("Python","script",0); if(scriptFilename.substr(scriptFilename.length() - 3, 3).compare(".py")==0) { diff --git a/src/rkl/rkl.hpp b/src/rkl/rkl.hpp index c8dd8cb96..6f22de6f5 100644 --- a/src/rkl/rkl.hpp +++ b/src/rkl/rkl.hpp @@ -198,7 +198,8 @@ RKLegendre::RKLegendre(Input &input, Fluid* hydroin) { nvarRKL = varListHost.size(); #ifdef WITH_MPI - mpi.Init(data->mygrid, varListHost, data->nghost.data(), data->np_int.data(), haveVs); + mpi.Init(data->mygrid, varListHost, data->nghost, data->np_int, + data->lbound, data->rbound, haveVs); #endif diff --git a/src/setup.cpp b/src/setup.cpp index 8c64f4447..b82f118f8 100644 --- a/src/setup.cpp +++ b/src/setup.cpp @@ -12,7 +12,9 @@ // own implementation of the constructor, initflow and destructor __attribute__((weak)) Setup::Setup(Input &input, Grid &grid, DataBlock &data, Output &output) { - IDEFIX_WARNING("Caution, this is the default Setup constructor and it does nothing!"); + #ifndef WITH_PYTHON + IDEFIX_WARNING("Caution, this is the default Setup constructor and it does nothing!"); + #endif } __attribute__((weak)) void Setup::InitFlow(DataBlock &data) { diff --git a/src/utils/column.cpp b/src/utils/column.cpp index 34d55f56b..4657abc55 100644 --- a/src/utils/column.cpp +++ b/src/utils/column.cpp @@ -56,7 +56,7 @@ Column::Column(int dir, int sign, DataBlock *data) std::vector mapVars; mapVars.push_back(ntarget); - this->mpi.Init(data->mygrid, mapVars, data->nghost.data(), data->np_int.data()); + this->mpi.Init(data->mygrid, mapVars, data->nghost, data->np_int, data->lbound, data->rbound); this->nproc = data->mygrid->nproc; #endif idfx::popRegion(); diff --git a/src/utils/lookupTable.hpp b/src/utils/lookupTable.hpp index c54fdb602..72d13a838 100644 --- a/src/utils/lookupTable.hpp +++ b/src/utils/lookupTable.hpp @@ -183,10 +183,10 @@ LookupTable::LookupTable(std::vector filenames, this->offsetDev = IdefixArray1D ("Table_offset", kDim); this->dataDev = IdefixArray1D ("Table_data", dataVector.size()); - this->xinHost = Kokkos::create_mirror_view(this->xinDev); - this->dimensionsHost = Kokkos::create_mirror_view(this->dimensionsDev); - this->offsetHost = Kokkos::create_mirror_view(this->offsetDev); - this->dataHost = Kokkos::create_mirror_view(this->dataDev); + this->xinHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->xinDev); + this->dimensionsHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dimensionsDev); + this->offsetHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->offsetDev); + this->dataHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dataDev); // Copy data in memory for(uint64_t i = 0 ; i < dataVector.size() ; i++) { @@ -346,10 +346,10 @@ LookupTable::LookupTable(std::string filename, char delimiter, bool errOOB this->offsetDev = IdefixArray1D ("Table_offset", kDim); this->dataDev = IdefixArray1D ("Table_data", size[0]*size[1]); - this->xinHost = Kokkos::create_mirror_view(this->xinDev); - this->dimensionsHost = Kokkos::create_mirror_view(this->dimensionsDev); - this->offsetHost = Kokkos::create_mirror_view(this->offsetDev); - this->dataHost = Kokkos::create_mirror_view(this->dataDev); + this->xinHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->xinDev); + this->dimensionsHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dimensionsDev); + this->offsetHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->offsetDev); + this->dataHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dataDev); // Fill the arrays with the std::vector content if(idfx::prank == 0) { @@ -458,10 +458,10 @@ LookupTable::LookupTable(Kokkos::View array, this->offsetDev = IdefixArray1D ("Table_offset", kDim); this->dataDev = IdefixArray1D ("Table_data", sizeTotal); - this->xinHost = Kokkos::create_mirror_view(this->xinDev); - this->dimensionsHost = Kokkos::create_mirror_view(this->dimensionsDev); - this->offsetHost = Kokkos::create_mirror_view(this->offsetDev); - this->dataHost = Kokkos::create_mirror_view(this->dataDev); + this->xinHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->xinDev); + this->dimensionsHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dimensionsDev); + this->offsetHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->offsetDev); + this->dataHost = Kokkos::create_mirror_view(Kokkos::HostSpace(), this->dataDev); // Copy data in memory for(uint64_t n = 0 ; n < sizeTotal ; n++) { diff --git a/test.py b/test.py new file mode 100755 index 000000000..6ded87fa0 --- /dev/null +++ b/test.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 + +import os +import sys +import pytest + +# set IDEFIX_DIR +source_dir = os.path.dirname(__file__) +os.environ["IDEFIX_DIR"] = source_dir + +# idefix test class +sys.path.append(source_dir) +from pytools.idfx_test_run import IdexPytestRunner + +# should be global so it remember state (last build) and avoids +# to build for each run if we just changed the ini file and run options. +gblIdefixPytestRunner = IdexPytestRunner(__file__) + +# define the pytest test +@pytest.mark.parametrize("config", gblIdefixPytestRunner.genTests()) +def test_idefix_build_run_check(config): + gblIdefixPytestRunner.run(config) + +# if called directly as a script +if __name__ == "__main__": + gblIdefixPytestRunner.main(all=True) diff --git a/test/Dust/DustEnergy/testme.json b/test/Dust/DustEnergy/testme.json new file mode 100644 index 000000000..9a78834b9 --- /dev/null +++ b/test/Dust/DustEnergy/testme.json @@ -0,0 +1,12 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": true, + "reconstruction": 2, + "tolerance": 0 + } + ] +} diff --git a/test/Dust/DustEnergy/testme.py b/test/Dust/DustEnergy/testme.py index 7187d299b..12d476b66 100755 --- a/test/Dust/DustEnergy/testme.py +++ b/test/Dust/DustEnergy/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/Dust/DustyShock/testme.json b/test/Dust/DustyShock/testme.json new file mode 100644 index 000000000..d8ddad44f --- /dev/null +++ b/test/Dust/DustyShock/testme.json @@ -0,0 +1,11 @@ +{ + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": true, + "reconstruction": 2, + "tolerance": 1e-14 + } + ] +} diff --git a/test/Dust/DustyShock/testme.py b/test/Dust/DustyShock/testme.py index 2b03d1636..622f34772 100755 --- a/test/Dust/DustyShock/testme.py +++ b/test/Dust/DustyShock/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=1e-14) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/Dust/DustyWave/testme.json b/test/Dust/DustyWave/testme.json new file mode 100644 index 000000000..0050f364e --- /dev/null +++ b/test/Dust/DustyWave/testme.json @@ -0,0 +1,12 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-implicit.ini"], + "noplot": true, + "reconstruction": 2, + "tolerance": 1e-14 + } + ] +} diff --git a/test/Dust/DustyWave/testme.py b/test/Dust/DustyWave/testme.py index 2b03d1636..622f34772 100755 --- a/test/Dust/DustyWave/testme.py +++ b/test/Dust/DustyWave/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=1e-14) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/HD/FargoPlanet/testme.json b/test/HD/FargoPlanet/testme.json new file mode 100644 index 000000000..e1d735828 --- /dev/null +++ b/test/HD/FargoPlanet/testme.json @@ -0,0 +1,25 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini", "idefix-rkl.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": [false, true], + "dec": [2, 2], + "tolerance": 1e-13 + } + ], + "when": [ + { + "conditions": { + "ini": "idefix-rkl.ini" + }, + "apply": { + "tolerance": 1e-12 + } + } + ] +} diff --git a/test/HD/FargoPlanet/testme.py b/test/HD/FargoPlanet/testme.py index 5890d010f..b25a74bb0 100755 --- a/test/HD/FargoPlanet/testme.py +++ b/test/HD/FargoPlanet/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/HD/MachReflection/testme.json b/test/HD/MachReflection/testme.json new file mode 100644 index 000000000..be2d8d950 --- /dev/null +++ b/test/HD/MachReflection/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": [false, true], + "dec": [2, 2], + "tolerance": 0 + } + ] +} diff --git a/test/HD/MachReflection/testme.py b/test/HD/MachReflection/testme.py index ce87f3923..7e6121815 100755 --- a/test/HD/MachReflection/testme.py +++ b/test/HD/MachReflection/testme.py @@ -23,7 +23,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp") -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/HD/SedovBlastWave/idefix.ini b/test/HD/SedovBlastWave/idefix.ini index 51122521c..107b719cd 100644 --- a/test/HD/SedovBlastWave/idefix.ini +++ b/test/HD/SedovBlastWave/idefix.ini @@ -25,6 +25,5 @@ X3-beg periodic X3-end periodic [Output] -vtk 0.1 -xdmf 0.1 -dmp 0.1 +vtk 0.1 +dmp 0.1 diff --git a/test/HD/SedovBlastWave/testme.json b/test/HD/SedovBlastWave/testme.json new file mode 100644 index 000000000..11327598b --- /dev/null +++ b/test/HD/SedovBlastWave/testme.json @@ -0,0 +1,28 @@ +{ + "namings": "definitionFile,ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "definitionFile": "definitions.hpp", + "ini": ["idefix.ini"], + "vectPot": false, + "reconstruction": 2, + "single": false, + "mpi": true, + "dec": [2, 2, 2 ], + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "definitionFile": "definitions-spherical.hpp", + "ini": ["idefix-spherical.ini"], + "vectPot": false, + "reconstruction": 2, + "single": false, + "mpi": true, + "dec": [2, 2, 2 ], + "standardTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/HD/SedovBlastWave/testme.py b/test/HD/SedovBlastWave/testme.py index 95fdd030a..c3ef7025f 100755 --- a/test/HD/SedovBlastWave/testme.py +++ b/test/HD/SedovBlastWave/testme.py @@ -12,7 +12,7 @@ name="dump.0001.dmp" -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2','2'] diff --git a/test/HD/ShearingBox/testme.json b/test/HD/ShearingBox/testme.json new file mode 100644 index 000000000..3e473d0ba --- /dev/null +++ b/test/HD/ShearingBox/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-fargo.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": false, + "dec": ["2","1","2"], + "tolerance": 1e-15 + } + ] +} diff --git a/test/HD/ShearingBox/testme.py b/test/HD/ShearingBox/testme.py index 3739910a0..18c792656 100755 --- a/test/HD/ShearingBox/testme.py +++ b/test/HD/ShearingBox/testme.py @@ -23,7 +23,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','1','2'] diff --git a/test/HD/ViscousDisk/testme.json b/test/HD/ViscousDisk/testme.json new file mode 100644 index 000000000..df8c19d86 --- /dev/null +++ b/test/HD/ViscousDisk/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": false, + "dec": ["2","1","2"], + "tolerance": 3e-15 + } + ] +} diff --git a/test/HD/ViscousDisk/testme.py b/test/HD/ViscousDisk/testme.py index 1e8ad742b..34628fd80 100755 --- a/test/HD/ViscousDisk/testme.py +++ b/test/HD/ViscousDisk/testme.py @@ -24,7 +24,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/HD/ViscousFlowPastCylinder/testme.json b/test/HD/ViscousFlowPastCylinder/testme.json new file mode 100644 index 000000000..7b3763436 --- /dev/null +++ b/test/HD/ViscousFlowPastCylinder/testme.json @@ -0,0 +1,23 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": [false, true], + "dec": ["2","2"], + "tolerance": 3e-14 + } + ], + "when": { + "conditions": { + "ini": "idefix-rkl.ini" + }, + "apply": { + "tolerance": 1e-8 + } + } +} diff --git a/test/HD/ViscousFlowPastCylinder/testme.py b/test/HD/ViscousFlowPastCylinder/testme.py index c831e8093..832a80e56 100755 --- a/test/HD/ViscousFlowPastCylinder/testme.py +++ b/test/HD/ViscousFlowPastCylinder/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/HD/sod-iso/testme.json b/test/HD/sod-iso/testme.json new file mode 100644 index 000000000..5016415d6 --- /dev/null +++ b/test/HD/sod-iso/testme.json @@ -0,0 +1,33 @@ +{ + "namings": "ini,single,reconstruction", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"], + "vectPot": false, + "noplot": true, + "reconstruction": [2, 3], + "single": [false], + "mpi": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-rk3.ini","idefix-hllc-rk3.ini"], + "vectPot": false, + "noplot": true, + "reconstruction": [4], + "single": [false], + "mpi": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"], + "vectPot": false, + "noplot": true, + "reconstruction": [2], + "single": [true], + "mpi": false, + "tolerance": 0 + } + ] +} diff --git a/test/HD/sod-iso/testme.py b/test/HD/sod-iso/testme.py index e8f0f9ae2..109109baf 100755 --- a/test/HD/sod-iso/testme.py +++ b/test/HD/sod-iso/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/HD/sod/testme.json b/test/HD/sod/testme.json new file mode 100644 index 000000000..9128c08d4 --- /dev/null +++ b/test/HD/sod/testme.json @@ -0,0 +1,33 @@ +{ + "namings": "ini,single,reconstruction", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"], + "noplot": true, + "vectPot": false, + "single": [false], + "reconstruction": [2,3], + "mpi": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-rk3.ini","idefix-hllc-rk3.ini"], + "noplot": true, + "vectPot": false, + "single": [false], + "reconstruction": [4], + "mpi": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hllc.ini","idefix-tvdlf.ini"], + "noplot": true, + "vectPot": false, + "reconstruction": [2], + "mpi": false, + "single": [true], + "tolerance": 0 + } + ] +} diff --git a/test/HD/sod/testme.py b/test/HD/sod/testme.py index e8f0f9ae2..109109baf 100755 --- a/test/HD/sod/testme.py +++ b/test/HD/sod/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/HD/thermalDiffusion/testme.json b/test/HD/thermalDiffusion/testme.json new file mode 100644 index 000000000..2e89ad747 --- /dev/null +++ b/test/HD/thermalDiffusion/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "reconstruction": 2, + "single": false, + "mpi": false, + "dec": ["2","1","2"], + "tolerance": 0 + } + ] +} diff --git a/test/HD/thermalDiffusion/testme.py b/test/HD/thermalDiffusion/testme.py index 3806293d0..1d1a181b0 100755 --- a/test/HD/thermalDiffusion/testme.py +++ b/test/HD/thermalDiffusion/testme.py @@ -23,7 +23,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp") -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/IO/dump/CMakeLists.txt b/test/IO/dump/CMakeLists.txt new file mode 100644 index 000000000..629aed2d1 --- /dev/null +++ b/test/IO/dump/CMakeLists.txt @@ -0,0 +1 @@ +enable_idefix_property(Idefix_MHD) diff --git a/test/IO/dump/definitions.hpp b/test/IO/dump/definitions.hpp new file mode 100644 index 000000000..a854ad9e3 --- /dev/null +++ b/test/IO/dump/definitions.hpp @@ -0,0 +1,5 @@ +#define COMPONENTS 3 +#define DIMENSIONS 3 +//#define DEBUG + +#define GEOMETRY CARTESIAN diff --git a/test/MHD/OrszagTang3D/idefix-checkrestart.ini b/test/IO/dump/idefix.ini similarity index 100% rename from test/MHD/OrszagTang3D/idefix-checkrestart.ini rename to test/IO/dump/idefix.ini diff --git a/test/IO/dump/setup.cpp b/test/IO/dump/setup.cpp new file mode 100644 index 000000000..1db56f5b0 --- /dev/null +++ b/test/IO/dump/setup.cpp @@ -0,0 +1,277 @@ +#include "idefix.hpp" +#include "setup.hpp" + +/*********************************************/ +/** +Customized random number generator +Allow one to have consistent random numbers +generators on different architectures. +**/ +/*********************************************/ + +Output* myOutput; +int outnum; +// Analysis function +// This analysis checks that the restart routines are performing as they should +void Analysis(DataBlock& data) { + + + idfx::cout << "Analysis: Checking restart routines" << std::endl; + + // Trigger dump creation + // data.SetBoundaries(); + myOutput->ForceWriteDump(data); + + // Mirror data on Host + DataBlockHost d(data); + + // Sync it + d.SyncFromDevice(); + + // Create local arrays to store the current physical state + IdefixHostArray4D myVc = IdefixHostArray4D("myVc", d.Vc.extent(0), data.np_tot[KDIR], data.np_tot[JDIR],data.np_tot[IDIR]); + IdefixHostArray4D myVs = IdefixHostArray4D("myVs", DIMENSIONS, data.np_tot[KDIR]+KOFFSET, data.np_tot[JDIR]+JOFFSET,data.np_tot[IDIR]+IOFFSET); + #ifdef EVOLVE_VECTOR_POTENTIAL + IdefixHostArray4D myVe = IdefixHostArray4D("myVe", AX3e+1, data.np_tot[KDIR]+KOFFSET, data.np_tot[JDIR]+JOFFSET,data.np_tot[IDIR]+IOFFSET); + #endif + // Transfer the datablock to myVc and myVs + for(int n = 0; n < d.Vc.extent(0) ; n++) { + for(int k = 0; k < d.np_tot[KDIR] ; k++) { + for(int j = 0; j < d.np_tot[JDIR] ; j++) { + for(int i = 0; i < d.np_tot[IDIR] ; i++) { + myVc(n,k,j,i) = d.Vc(n,k,j,i); + d.Vc(n,k,j,i) = 0.0; + + } + } + } + } + + for(int n = 0; n < DIMENSIONS ; n++) { + for(int k = 0; k < d.np_tot[KDIR] + KOFFSET; k++) { + for(int j = 0; j < d.np_tot[JDIR] + JOFFSET; j++) { + for(int i = 0; i < d.np_tot[IDIR] + IOFFSET; i++) { + myVs(n,k,j,i) = d.Vs(n,k,j,i); + d.Vs(n,k,j,i) = 0.0; + } + } + } + } + #ifdef EVOLVE_VECTOR_POTENTIAL + for(int n = 0; n < AX3e+1 ; n++) { + for(int k = 0; k < d.np_tot[KDIR] + KOFFSET; k++) { + for(int j = 0; j < d.np_tot[JDIR] + JOFFSET; j++) { + for(int i = 0; i < d.np_tot[IDIR] + IOFFSET; i++) { + myVe(n,k,j,i) = d.Ve(n,k,j,i); + d.Ve(n,k,j,i) = 0.0; + } + } + } + } + #endif + + // Push our datablockHost to erase everything + d.SyncToDevice(); + // From this point, the dataBlock is full of zeros + + // Load back the restart dump + myOutput->RestartFromDump(data, outnum); + data.SetBoundaries(); + #ifdef EVOLVE_VECTOR_POTENTIAL + data.hydro->emf->ComputeMagFieldFromA(data.hydro->Ve, data.hydro->Vs); + #endif + d.SyncFromDevice(); + + // increment outnum + outnum++; + int errornum; + + errornum = 0; + idfx::cout << "Analysis: checking consistency" << std::endl; + // Check that the save/load routines have left everything unchanged. + for(int n = 0; n < d.Vc.extent(0) ; n++) { + for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { + if(myVc(n,k,j,i) != d.Vc(n,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Vc at (i,j,k,n) = ( " << i << ", " << j << ", " << k << ", " << n << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl + << "Original= " << myVc(n,k,j,i) << " New=" << d.Vc(n,k,j,i) << " diff=" << myVc(n,k,j,i)-d.Vc(n,k,j,i) << std::endl; + } + + } + } + } + } + + for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { + if(myVs(BX1s,k,j,i) != d.Vs(BX1s,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Vs(BX1s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl + << "Original= " << myVs(BX1s,k,j,i) << " New=" << d.Vs(BX1s,k,j,i) << " diff=" << myVs(BX1s,k,j,i)-d.Vs(BX1s,k,j,i) << std::endl; + + } + + } + } + } + for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { + if(myVs(BX2s,k,j,i) != d.Vs(BX2s,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Vs(BX2s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; + } + + } + } + } + for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { + if(myVs(BX3s,k,j,i) != d.Vs(BX3s,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Vs(BX3s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl + << "Original= " << myVs(BX3s,k,j,i) << " New=" << d.Vs(BX3s,k,j,i) << " diff=" << myVs(BX3s,k,j,i)-d.Vs(BX3s,k,j,i) << std::endl; + } + + } + } + } +#ifdef EVOLVE_VECTOR_POTENTIAL + for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { + if(myVe(AX1e,k,j,i) != d.Ve(AX1e,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Ve(AX1e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; + + } + + } + } + } + for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { + if(myVe(AX2e,k,j,i) != d.Ve(AX2e,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Ve(AX2e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; + } + + } + } + } + for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { + for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { + for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { + if(myVe(AX3e,k,j,i) != d.Ve(AX3e,k,j,i)) { + errornum++; + idfx::cout << "-----------------------------------------" << std::endl + << " Error in Ve(AX3e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl + << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl + << "Original= " << myVs(BX3s,k,j,i) << " New=" << d.Vs(BX3s,k,j,i) << " diff=" << myVs(BX3s,k,j,i)-d.Vs(BX3s,k,j,i) << std::endl; + } + + } + } + } +#endif + + idfx::cout << "Analysis: consistency check done with " << errornum << " errors " << std::endl; + if(errornum>0) { + IDEFIX_ERROR("Restart from dump failed validation"); + } +} + +// Initialisation routine. Can be used to allocate +// Arrays or variables which are used later on +Setup::Setup(Input &input, Grid &grid, DataBlock &data, Output &output) { + if(input.CheckEntry("Output","analysis")>0) { + output.EnrollAnalysis(&Analysis); + myOutput = &output; + outnum=0; + } +} + +// This routine initialize the flow +// Note that data is on the device. +// One can therefore define locally +// a datahost and sync it, if needed +void Setup::InitFlow(DataBlock &data) { + // Create a host copy + DataBlockHost d(data); + real x,y,z; + IdefixHostArray4D Ve; + + #ifndef EVOLVE_VECTOR_POTENTIAL + Ve = IdefixHostArray4D("Potential vector",3, d.np_tot[KDIR]+1, d.np_tot[JDIR]+1, d.np_tot[IDIR]+1); + #else + Ve = d.Ve; + #endif + + bool haveTracer = data.hydro->haveTracer; + + real B0=1.0/sqrt(4.0*M_PI); + + for(int k = 0; k < d.np_tot[KDIR] ; k++) { + for(int j = 0; j < d.np_tot[JDIR] ; j++) { + for(int i = 0; i < d.np_tot[IDIR] ; i++) { + x=d.x[IDIR](i); + y=d.x[JDIR](j); + z=d.x[KDIR](k); + + d.Vc(RHO,k,j,i) = 25.0/(36.0*M_PI); + d.Vc(PRS,k,j,i) = 5.0/(12.0*M_PI); + d.Vc(VX1,k,j,i) = -sin(2.0*M_PI*y); + d.Vc(VX2,k,j,i) = sin(2.0*M_PI*x)+cos(2.0*M_PI*z); + d.Vc(VX3,k,j,i) = cos(2.0*M_PI*x); + + real xl=d.xl[IDIR](i); + real yl=d.xl[JDIR](j); + real zl=d.xl[KDIR](k); + Ve(IDIR,k,j,i) = B0/(2.0*M_PI)*(cos(2.0*M_PI*yl)); + Ve(JDIR,k,j,i) = B0/(2.0*M_PI)*sin(2.0*M_PI*xl); + Ve(KDIR,k,j,i) = B0/(2.0*M_PI)*( + cos(2.0*M_PI*yl) + cos(4.0*M_PI*xl)/2.0); + + if(haveTracer) { + d.Vc(TRG ,k,j,i) = x>0.5? 1.0:0.0; + d.Vc(TRG+1,k,j,i) = z>0.5? 1.0:0.0; + } + } + } + } + + #ifndef EVOLVE_VECTOR_POTENTIAL + d.MakeVsFromAmag(Ve); + #endif + // Send it all, if needed + d.SyncToDevice(); +} + +// Analyse data to produce an output +void MakeAnalysis(DataBlock & data) { + +} + + + +// Do a specifically designed user step in the middle of the integration +void ComputeUserStep(DataBlock &data, real t, real dt) { + +} diff --git a/test/IO/dump/testme.json b/test/IO/dump/testme.json new file mode 100644 index 000000000..cb69f946f --- /dev/null +++ b/test/IO/dump/testme.json @@ -0,0 +1,28 @@ +{ + "namings": "ini,single,vectPot,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": [false, true], + "reconstruction": 2, + "single": [false], + "mpi": [false, true], + "dec": ["2","2","2"], + "standardTest": false, + "nonRegressionTest": false, + "tolerance": 1e-13 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": [false], + "reconstruction": 2, + "mpi": [false, true], + "single": [true], + "dec": ["2","2","2"], + "standardTest": false, + "nonRegressionTest": false, + "tolerance": 1e-13 + } + ] +} diff --git a/test/IO/dump/testme.py b/test/IO/dump/testme.py new file mode 100755 index 000000000..25e84433b --- /dev/null +++ b/test/IO/dump/testme.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +""" + +@author: glesur +""" +import os +import sys +sys.path.append(os.getenv("IDEFIX_DIR")) + +import pytools.idfx_test as tst + +# Whether we should reset our reference run (only do that on purpose!) + +tolerance=1e-13 + +def testMe(test): + test.configure() + test.compile() + + # Check restarts + test.run("idefix.ini") + + +test=tst.idfxTest(__file__) + +# if no decomposition is specified, use that one +if not test.dec: + test.dec=["2","2","2"] + +if not test.all: + testMe(test) +else: + test.vectPot=False + test.reconstruction=2 + test.mpi=False + testMe(test) + # test in MPI mode + test.mpi=True + testMe(test) + + + # test with vector potential + test.mpi=False + test.vectPot=True + test.reconstruction=2 + testMe(test) + + test.mpi=True + testMe(test) + + # test with other precision + test.single=True + test.vectPot=False + test.reconstruction=2 + test.mpi=False + testMe(test) + # test in MPI mode + test.mpi=True + testMe(test) diff --git a/test/IO/pydefix/README.md b/test/IO/pydefix/README.md index 820a0958e..8dd3f5b4f 100644 --- a/test/IO/pydefix/README.md +++ b/test/IO/pydefix/README.md @@ -42,3 +42,10 @@ export pybind11_DIR=env/lib/python3.10/site-packages/pybind11 ``` you can then run cmake which should be able to find pybind11, and compile the code. + +If while running the code using a python interpreter in an environment, you run into errors stating that some module +installed in your environement is not present. This is because pybind11 does not always capture your venv. You need to force the following environement variables (replace XX by your python version) + +```bash +export PYTHONPATH=$VIRTUAL_ENV/lib/python3.XX/site-packages:$PYTHONPATH +``` diff --git a/test/IO/pydefix/idefix.ini b/test/IO/pydefix/idefix.ini index 57fdcfc4a..3ee42ae3e 100644 --- a/test/IO/pydefix/idefix.ini +++ b/test/IO/pydefix/idefix.ini @@ -26,5 +26,6 @@ X3-beg outflow X3-end outflow [Output] -log 10 +log 100 python 0.02 +dmp 0.5 diff --git a/test/IO/pydefix/python_requirements.txt b/test/IO/pydefix/python_requirements.txt index 6afd3019b..9489d0889 100644 --- a/test/IO/pydefix/python_requirements.txt +++ b/test/IO/pydefix/python_requirements.txt @@ -1,6 +1,7 @@ # note: version requirements are indicative and tests # should be able to run with # older versions of our dependencies, though it is not guaranteed. +# minimally require last versions available for Python 2.7 numpy>=1.16.6 matplotlib>=2.2.5 -pybind11>=2.12.0 +pybind11>=2.10.0 diff --git a/test/IO/pydefix/testme.json b/test/IO/pydefix/testme.json new file mode 100644 index 000000000..78760f724 --- /dev/null +++ b/test/IO/pydefix/testme.json @@ -0,0 +1,17 @@ +{ + "namings": "ini,single,vectPot,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "reconstruction": 2, + "single": false, + "mpi": [false, true], + "dec": ["2","2"], + "standardTest": false, + "tolerance": 1e-12 + } + ] +} diff --git a/test/IO/pydefix/testme.py b/test/IO/pydefix/testme.py new file mode 100755 index 000000000..2940e56dd --- /dev/null +++ b/test/IO/pydefix/testme.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +""" + +@author: glesur +""" +import os +import sys +sys.path.append(os.getenv("IDEFIX_DIR")) + +import pytools.idfx_test as tst +tolerance=1e-12 +def testMe(test): + test.configure() + test.compile() + + test.run(inputFile="idefix.ini") + if test.init and not test.mpi: + test.makeReference(filename="dump.0001.dmp") + + test.nonRegressionTest(filename="dump.0001.dmp",tolerance=tolerance) + + +test=tst.idfxTest(__file__) +if not test.dec: + test.dec=['2','2'] + +if not test.all: + if(test.check): + test.checkOnly(filename="dump.0001.dmp",tolerance=tolerance) + else: + testMe(test) +else: + test.noplot = True + test.vectPot = False + test.single=False + test.reconstruction=2 + test.mpi=False + testMe(test) + test.mpi=True + testMe(test) diff --git a/test/HD/SedovBlastWave/CMakeLists.txt b/test/IO/xdmf/CMakeLists.txt similarity index 50% rename from test/HD/SedovBlastWave/CMakeLists.txt rename to test/IO/xdmf/CMakeLists.txt index c4ae088b8..8ce9e0f4d 100644 --- a/test/HD/SedovBlastWave/CMakeLists.txt +++ b/test/IO/xdmf/CMakeLists.txt @@ -1 +1,2 @@ +enable_idefix_property(Idefix_MHD) enable_idefix_property(Idefix_HDF5) diff --git a/test/IO/xdmf/definitions.hpp b/test/IO/xdmf/definitions.hpp new file mode 100644 index 000000000..a854ad9e3 --- /dev/null +++ b/test/IO/xdmf/definitions.hpp @@ -0,0 +1,5 @@ +#define COMPONENTS 3 +#define DIMENSIONS 3 +//#define DEBUG + +#define GEOMETRY CARTESIAN diff --git a/test/IO/xdmf/idefix.ini b/test/IO/xdmf/idefix.ini new file mode 100644 index 000000000..250a24054 --- /dev/null +++ b/test/IO/xdmf/idefix.ini @@ -0,0 +1,26 @@ +[Grid] +X1-grid 1 0.0 32 u 1.0 +X2-grid 1 0.0 64 u 1.0 +X3-grid 1 0.0 32 u 1.0 + +[TimeIntegrator] +CFL 0.9 +tstop 0.2 +first_dt 1.e-4 +nstages 2 + +[Hydro] +solver hlld + +[Boundary] +X1-beg periodic +X1-end periodic +X2-beg periodic +X2-end periodic +X3-beg periodic +X3-end periodic + +[Output] +xdmf 0.2 +dmp 0.2 +log 10 diff --git a/test/IO/xdmf/setup.cpp b/test/IO/xdmf/setup.cpp new file mode 100644 index 000000000..8cc40857f --- /dev/null +++ b/test/IO/xdmf/setup.cpp @@ -0,0 +1,75 @@ +#include "idefix.hpp" +#include "setup.hpp" + +// Initialisation routine. Can be used to allocate +// Arrays or variables which are used later on +Setup::Setup(Input &input, Grid &grid, DataBlock &data, Output &output) { +} + +// This routine initialize the flow +// Note that data is on the device. +// One can therefore define locally +// a datahost and sync it, if needed +void Setup::InitFlow(DataBlock &data) { + // Create a host copy + DataBlockHost d(data); + real x,y,z; + IdefixHostArray4D Ve; + + #ifndef EVOLVE_VECTOR_POTENTIAL + Ve = IdefixHostArray4D("Potential vector",3, d.np_tot[KDIR]+1, d.np_tot[JDIR]+1, d.np_tot[IDIR]+1); + #else + Ve = d.Ve; + #endif + + bool haveTracer = data.hydro->haveTracer; + + real B0=1.0/sqrt(4.0*M_PI); + + for(int k = 0; k < d.np_tot[KDIR] ; k++) { + for(int j = 0; j < d.np_tot[JDIR] ; j++) { + for(int i = 0; i < d.np_tot[IDIR] ; i++) { + x=d.x[IDIR](i); + y=d.x[JDIR](j); + z=d.x[KDIR](k); + + d.Vc(RHO,k,j,i) = 25.0/(36.0*M_PI); + d.Vc(PRS,k,j,i) = 5.0/(12.0*M_PI); + d.Vc(VX1,k,j,i) = -sin(2.0*M_PI*y); + d.Vc(VX2,k,j,i) = sin(2.0*M_PI*x)+cos(2.0*M_PI*z); + d.Vc(VX3,k,j,i) = cos(2.0*M_PI*x); + + real xl=d.xl[IDIR](i); + real yl=d.xl[JDIR](j); + real zl=d.xl[KDIR](k); + Ve(IDIR,k,j,i) = B0/(2.0*M_PI)*(cos(2.0*M_PI*yl)); + Ve(JDIR,k,j,i) = B0/(2.0*M_PI)*sin(2.0*M_PI*xl); + Ve(KDIR,k,j,i) = B0/(2.0*M_PI)*( + cos(2.0*M_PI*yl) + cos(4.0*M_PI*xl)/2.0); + + if(haveTracer) { + d.Vc(TRG ,k,j,i) = x>0.5? 1.0:0.0; + d.Vc(TRG+1,k,j,i) = z>0.5? 1.0:0.0; + } + } + } + } + + #ifndef EVOLVE_VECTOR_POTENTIAL + d.MakeVsFromAmag(Ve); + #endif + // Send it all, if needed + d.SyncToDevice(); +} + +// Analyse data to produce an output +void MakeAnalysis(DataBlock & data) { + +} + + + +// Do a specifically designed user step in the middle of the integration +void ComputeUserStep(DataBlock &data, real t, real dt) { + +} diff --git a/test/IO/xdmf/testme.json b/test/IO/xdmf/testme.json new file mode 100644 index 000000000..8198bb7bb --- /dev/null +++ b/test/IO/xdmf/testme.json @@ -0,0 +1,17 @@ +{ + "namings": "ini,single,vectPot,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": false, + "reconstruction": 2, + "single": false, + "mpi": false, + "dec": ["2","2","2"], + "standardTest": false, + "nonRegressionTest": false, + "check_file_produced": [ "data.0001.flt.xmf", "data.0001.flt.h5" ] + } + ] +} diff --git a/test/IO/xdmf/testme.py b/test/IO/xdmf/testme.py new file mode 100755 index 000000000..6273bbe0e --- /dev/null +++ b/test/IO/xdmf/testme.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +""" + +@author: glesur +""" +import os +import sys +sys.path.append(os.getenv("IDEFIX_DIR")) + +import pytools.idfx_test as tst + +name="dump.0001.dmp" + +test=tst.idfxTest(__file__) + +def check_xdmf_exists(): + # verify that the expected XDMF sidecar and data file exist. + xdmf_file = "data.0001.flt.xmf" + h5_file = "data.0001.flt.h5" + + + if not os.path.exists(xdmf_file): + print("Missing expected XDMF output file: {}".format(xdmf_file)) + sys.exit(1) + if not os.path.exists(h5_file): + print("Missing expected XDMF data file: {}".format(h5_file)) + sys.exit(1) + +if not test.dec: + test.dec=['2','2','2'] + +if test.check: + check_xdmf_exists() + +else: + test.vectPot=False + test.single=False + test.reconstruction=2 + test.mpi=False + # Only check that the test runs. + test.configure(definitionFile="definitions.hpp") + test.compile() + test.run(inputFile="idefix.ini") + check_xdmf_exists diff --git a/test/MHD/AmbipolarCshock/testme.json b/test/MHD/AmbipolarCshock/testme.json new file mode 100644 index 000000000..2eb2ac51f --- /dev/null +++ b/test/MHD/AmbipolarCshock/testme.json @@ -0,0 +1,14 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": false, + "tolerance": 0 + } + ] +} diff --git a/test/MHD/AmbipolarCshock/testme.py b/test/MHD/AmbipolarCshock/testme.py index 675958a7b..0e2f0815e 100755 --- a/test/MHD/AmbipolarCshock/testme.py +++ b/test/MHD/AmbipolarCshock/testme.py @@ -23,7 +23,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/AmbipolarCshock3D/idefix-rkl.ini b/test/MHD/AmbipolarCshock3D/idefix-rkl.ini index 055429557..621552b13 100644 --- a/test/MHD/AmbipolarCshock3D/idefix-rkl.ini +++ b/test/MHD/AmbipolarCshock3D/idefix-rkl.ini @@ -22,8 +22,8 @@ X1-beg userdef X1-end userdef X2-beg periodic X2-end periodic -X3-beg periodic -X3-end periodic +X3-beg userdef +X3-end userdef [Output] vtk 100.0 diff --git a/test/MHD/AmbipolarCshock3D/idefix.ini b/test/MHD/AmbipolarCshock3D/idefix.ini index 984e985ef..78c7ddd23 100644 --- a/test/MHD/AmbipolarCshock3D/idefix.ini +++ b/test/MHD/AmbipolarCshock3D/idefix.ini @@ -19,8 +19,8 @@ X1-beg userdef X1-end userdef X2-beg periodic X2-end periodic -X3-beg periodic -X3-end periodic +X3-beg userdef +X3-end userdef [Output] vtk 100.0 diff --git a/test/MHD/AmbipolarCshock3D/setup.cpp b/test/MHD/AmbipolarCshock3D/setup.cpp index 5f8ec9313..abf8a0f24 100644 --- a/test/MHD/AmbipolarCshock3D/setup.cpp +++ b/test/MHD/AmbipolarCshock3D/setup.cpp @@ -79,7 +79,48 @@ void UserdefBoundary(Hydro *hydro, int dir, BoundarySide side, real t) { }); } - + if(dir ==KDIR) { + auto Vc = hydro->Vc; + auto Vs = hydro->Vs; + int jbeg = data->beg[JDIR]; + int jend = data->end[JDIR]; + int kbeg = data->beg[KDIR]; + int kend = data->end[KDIR]; + hydro->boundary->BoundaryForAll("UserdefBoundary", dir, side, + KOKKOS_LAMBDA (int n, int k, int j, int i) { + int kref=k; + + if(side == left) { + kref = kbeg; + } else { + kref = kend -1; + } + Vc(n,k,j,i) = Vc(n,kref,j,i); + }); + + if(dir == KDIR) { + hydro->boundary->BoundaryForX1s("UserdefBoundaryX1s", dir, side, + KOKKOS_LAMBDA (int k, int j, int i) { + int kref=k; + if(side == left) { + kref = kbeg; + } else { + kref = kend -1; + } + Vs(BX1s,k,j,i) = Vs(BX1s,kref,j,i); + }); + hydro->boundary->BoundaryForX2s("UserdefBoundaryX2s", dir, side, + KOKKOS_LAMBDA (int k, int j, int i) { + int kref=k; + if(side == left) { + kref = kbeg; + } else { + kref = kend -1; + } + Vs(BX2s,k,j,i) = Vs(BX2s,kref,j,i); + }); + } + } } @@ -136,8 +177,8 @@ void Setup::InitFlow(DataBlock &data) { real z = d.x[KDIR](k); real y = d.x[JDIR](j); d.Ve(AX1e,k,j,i) = B0*sin(theta)*z; - d.Ve(AX2e,k,j,i) = ZERO_F; - d.Ve(AX3e,k,j,i) = B0*cos(theta)*y; + d.Ve(AX2e,k,j,i) = -B0*cos(theta)*z; + d.Ve(AX3e,k,j,i) = 0; #else IDEFIX_ERROR("Vector potential only valid in 3 dimensions for this setup"); #endif diff --git a/test/MHD/AmbipolarCshock3D/testme.json b/test/MHD/AmbipolarCshock3D/testme.json new file mode 100644 index 000000000..fdcb11a76 --- /dev/null +++ b/test/MHD/AmbipolarCshock3D/testme.json @@ -0,0 +1,35 @@ +{ + "namings": "ini,mpi,vectPot", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": [false], + "vectPot": [false, true], + "dec": ["2","1","1"], + "tolerance": 3e-14 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": [true], + "vectPot": [false], + "dec": ["2","1","1"], + "tolerance": 3e-14 + } + ], + "when": { + "conditions": { + "ini": "idefix-rkl.ini", + "mpi": true + }, + "apply": { + "tolerance": 2e-10 + } + } +} diff --git a/test/MHD/AmbipolarCshock3D/testme.py b/test/MHD/AmbipolarCshock3D/testme.py index f9fae6765..94a8b0f4c 100755 --- a/test/MHD/AmbipolarCshock3D/testme.py +++ b/test/MHD/AmbipolarCshock3D/testme.py @@ -9,7 +9,7 @@ sys.path.append(os.getenv("IDEFIX_DIR")) import pytools.idfx_test as tst -tolerance=2e-14 +tolerance=3e-14 def testMe(test): test.configure() test.compile() @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','1','1'] diff --git a/test/MHD/AxisFluxTube/testme.json b/test/MHD/AxisFluxTube/testme.json new file mode 100644 index 000000000..781d4f7b3 --- /dev/null +++ b/test/MHD/AxisFluxTube/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-coarsening.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "vectPot": false, + "standardTest": false, + "tolerance": 1e-14 + } + ] +} diff --git a/test/MHD/AxisFluxTube/testme.py b/test/MHD/AxisFluxTube/testme.py index 44a53dc28..5abb46568 100755 --- a/test/MHD/AxisFluxTube/testme.py +++ b/test/MHD/AxisFluxTube/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/Coarsening/testme.json b/test/MHD/Coarsening/testme.json new file mode 100644 index 000000000..5b38ffebb --- /dev/null +++ b/test/MHD/Coarsening/testme.json @@ -0,0 +1,12 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini","idefix-x2.ini","idefix-x3.ini"], + "noplot": true, + "mpi": [false,true], + "tolerance": 0 + } + ] +} diff --git a/test/MHD/Coarsening/testme.py b/test/MHD/Coarsening/testme.py index 52a083bd2..50d13c926 100755 --- a/test/MHD/Coarsening/testme.py +++ b/test/MHD/Coarsening/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/FargoMHDSpherical/testme.json b/test/MHD/FargoMHDSpherical/testme.json new file mode 100644 index 000000000..72d93f493 --- /dev/null +++ b/test/MHD/FargoMHDSpherical/testme.json @@ -0,0 +1,17 @@ +{ + "namings": "ini,mpi,vectPot", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "vectPot": [false, true], + "mpi": [false, true], + "dec": ["2","2","2"], + "standardTest": false, + "tolerance": 1e-14 + } + ] +} diff --git a/test/MHD/FargoMHDSpherical/testme.py b/test/MHD/FargoMHDSpherical/testme.py index 7ee3100b2..1a3b28adb 100755 --- a/test/MHD/FargoMHDSpherical/testme.py +++ b/test/MHD/FargoMHDSpherical/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2','2'] diff --git a/test/MHD/HallWhistler/testme.json b/test/MHD/HallWhistler/testme.json new file mode 100644 index 000000000..cc71c4bf7 --- /dev/null +++ b/test/MHD/HallWhistler/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "tolerance": 1e-15 + } + ] +} diff --git a/test/MHD/HallWhistler/testme.py b/test/MHD/HallWhistler/testme.py index f8d633840..f8ba0a258 100755 --- a/test/MHD/HallWhistler/testme.py +++ b/test/MHD/HallWhistler/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/LinearWaveTest/idefix-entropy.ini b/test/MHD/LinearWaveTest/idefix-entropy.ini index 5a54d274b..805819477 100644 --- a/test/MHD/LinearWaveTest/idefix-entropy.ini +++ b/test/MHD/LinearWaveTest/idefix-entropy.ini @@ -29,4 +29,4 @@ epsilon 1.0e-6 [Output] dmp 1.0 vtk 1.0 -log 1 +log 10 diff --git a/test/MHD/LinearWaveTest/testme.json b/test/MHD/LinearWaveTest/testme.json new file mode 100644 index 000000000..a3f997e34 --- /dev/null +++ b/test/MHD/LinearWaveTest/testme.json @@ -0,0 +1,24 @@ +{ + "namings": "ini,reconstruction,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix-fast.ini","idefix-slow.ini","idefix-alfven.ini","idefix-entropy.ini"], + "noplot": true, + "single": false, + "reconstruction": [2], + "mpi": [false,true], + "dec": ["2","2","2"], + "tolerance": 2e-13 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-fast.ini","idefix-slow.ini","idefix-alfven.ini","idefix-entropy.ini"], + "noplot": true, + "single": false, + "reconstruction": [3, 4], + "mpi": [false], + "dec": ["2","2","2"], + "tolerance": 2e-13 + } + ] +} diff --git a/test/MHD/LinearWaveTest/testme.py b/test/MHD/LinearWaveTest/testme.py index 8c9282862..eeaaab0fe 100755 --- a/test/MHD/LinearWaveTest/testme.py +++ b/test/MHD/LinearWaveTest/testme.py @@ -9,7 +9,7 @@ sys.path.append(os.getenv("IDEFIX_DIR")) import pytools.idfx_test as tst -tolerance=1e-14 +tolerance=2e-13 def testMe(test): test.configure() test.compile() @@ -24,7 +24,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2','2'] diff --git a/test/MHD/MTI/testme.json b/test/MHD/MTI/testme.json new file mode 100644 index 000000000..3bd669cca --- /dev/null +++ b/test/MHD/MTI/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini","idefix-sl.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "tolerance": 0 + } + ] +} diff --git a/test/MHD/MTI/testme.py b/test/MHD/MTI/testme.py index 7975c8075..0e2c9e317 100755 --- a/test/MHD/MTI/testme.py +++ b/test/MHD/MTI/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/OrszagTang/testme.json b/test/MHD/OrszagTang/testme.json new file mode 100644 index 000000000..3128b3239 --- /dev/null +++ b/test/MHD/OrszagTang/testme.json @@ -0,0 +1,74 @@ +{ + "namings": "ini,reconstruction,mpi,single,vectPot", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": [ + "idefix.ini", + "idefix-hll.ini", + "idefix-hlld-arithmetic.ini", + "idefix-hlld-hll.ini", + "idefix-hlld-hlld.ini", + "idefix-hlld-uct0.ini", + "idefix-hlld.ini", + "idefix-tvdlf.ini" + ], + "noplot": true, + "vectPot": [false], + "single": false, + "reconstruction": [2,3,4], + "mpi": [false, true], + "dec": ["2","2"], + "tolerance": 1e-12, + "standardTest": false + },{ + "dumpname": "dump.0001.dmp", + "ini": [ + "idefix.ini", + "idefix-hll.ini", + "idefix-hlld-arithmetic.ini", + "idefix-hlld-hll.ini", + "idefix-hlld-hlld.ini", + "idefix-hlld-uct0.ini", + "idefix-hlld.ini", + "idefix-tvdlf.ini" + ], + "noplot": true, + "vectPot": [false], + "single": true, + "reconstruction": [2], + "mpi": [false], + "dec": ["2","2"], + "tolerance": 1e-12, + "standardTest": false + },{ + "dumpname": "dump.0001.dmp", + "ini": [ + "idefix.ini", + "idefix-hll.ini", + "idefix-hlld-arithmetic.ini", + "idefix-hlld-hll.ini", + "idefix-hlld-hlld.ini", + "idefix-hlld-uct0.ini", + "idefix-hlld.ini", + "idefix-tvdlf.ini" + ], + "noplot": true, + "vectPot": [true], + "single": false, + "reconstruction": [2], + "mpi": [false], + "dec": ["2","2"], + "tolerance": 1e-12, + "standardTest": false + } + ], + "when": { + "conditions": { + "single": true + }, + "apply" : { + "tolerance": 1e-5 + } + } +} diff --git a/test/MHD/OrszagTang/testme.py b/test/MHD/OrszagTang/testme.py index 761505caa..41f33aba6 100755 --- a/test/MHD/OrszagTang/testme.py +++ b/test/MHD/OrszagTang/testme.py @@ -33,7 +33,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/MHD/OrszagTang3D/setup.cpp b/test/MHD/OrszagTang3D/setup.cpp index 8e7205c42..a6927a132 100644 --- a/test/MHD/OrszagTang3D/setup.cpp +++ b/test/MHD/OrszagTang3D/setup.cpp @@ -12,198 +12,10 @@ generators on different architectures. Output* myOutput; int outnum; // Analysis function -// This analysis checks that the restart routines are performing as they should -void Analysis(DataBlock& data) { - - - idfx::cout << "Analysis: Checking restart routines" << std::endl; - - // Trigger dump creation - myOutput->ForceWriteDump(data); - - // Mirror data on Host - DataBlockHost d(data); - - // Sync it - d.SyncFromDevice(); - - // Create local arrays to store the current physical state - IdefixHostArray4D myVc = IdefixHostArray4D("myVc", d.Vc.extent(0), data.np_tot[KDIR], data.np_tot[JDIR],data.np_tot[IDIR]); - IdefixHostArray4D myVs = IdefixHostArray4D("myVs", DIMENSIONS, data.np_tot[KDIR]+KOFFSET, data.np_tot[JDIR]+JOFFSET,data.np_tot[IDIR]+IOFFSET); - #ifdef EVOLVE_VECTOR_POTENTIAL - IdefixHostArray4D myVe = IdefixHostArray4D("myVe", AX3e+1, data.np_tot[KDIR]+KOFFSET, data.np_tot[JDIR]+JOFFSET,data.np_tot[IDIR]+IOFFSET); - #endif - // Transfer the datablock to myVc and myVs - for(int n = 0; n < d.Vc.extent(0) ; n++) { - for(int k = 0; k < d.np_tot[KDIR] ; k++) { - for(int j = 0; j < d.np_tot[JDIR] ; j++) { - for(int i = 0; i < d.np_tot[IDIR] ; i++) { - myVc(n,k,j,i) = d.Vc(n,k,j,i); - d.Vc(n,k,j,i) = 0.0; - - } - } - } - } - - for(int n = 0; n < DIMENSIONS ; n++) { - for(int k = 0; k < d.np_tot[KDIR] + KOFFSET; k++) { - for(int j = 0; j < d.np_tot[JDIR] + JOFFSET; j++) { - for(int i = 0; i < d.np_tot[IDIR] + IOFFSET; i++) { - myVs(n,k,j,i) = d.Vs(n,k,j,i); - d.Vs(n,k,j,i) = 0.0; - } - } - } - } - #ifdef EVOLVE_VECTOR_POTENTIAL - for(int n = 0; n < AX3e+1 ; n++) { - for(int k = 0; k < d.np_tot[KDIR] + KOFFSET; k++) { - for(int j = 0; j < d.np_tot[JDIR] + JOFFSET; j++) { - for(int i = 0; i < d.np_tot[IDIR] + IOFFSET; i++) { - myVe(n,k,j,i) = d.Ve(n,k,j,i); - d.Ve(n,k,j,i) = 0.0; - } - } - } - } - #endif - - // Push our datablockHost to erase everything - d.SyncToDevice(); - // From this point, the dataBlock is full of zeros - - // Load back the restart dump - myOutput->RestartFromDump(data, outnum); - data.SetBoundaries(); - #ifdef EVOLVE_VECTOR_POTENTIAL - data.hydro->emf->ComputeMagFieldFromA(data.hydro->Ve, data.hydro->Vs); - #endif - d.SyncFromDevice(); - - // increment outnum - outnum++; - int errornum; - - errornum = 0; - idfx::cout << "Analysis: checking consistency" << std::endl; - // Check that the save/load routines have left everything unchanged. - for(int n = 0; n < d.Vc.extent(0) ; n++) { - for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { - if(myVc(n,k,j,i) != d.Vc(n,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Vc at (i,j,k,n) = ( " << i << ", " << j << ", " << k << ", " << n << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; - } - - } - } - } - } - - for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { - if(myVs(BX1s,k,j,i) != d.Vs(BX1s,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Vs(BX1s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl - << "Original= " << myVs(BX1s,k,j,i) << " New=" << d.Vs(BX1s,k,j,i) << " diff=" << myVs(BX1s,k,j,i)-d.Vs(BX1s,k,j,i) << std::endl; - - } - - } - } - } - for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { - if(myVs(BX2s,k,j,i) != d.Vs(BX2s,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Vs(BX2s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; - } - - } - } - } - for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { - if(myVs(BX3s,k,j,i) != d.Vs(BX3s,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Vs(BX3s) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl - << "Original= " << myVs(BX3s,k,j,i) << " New=" << d.Vs(BX3s,k,j,i) << " diff=" << myVs(BX3s,k,j,i)-d.Vs(BX3s,k,j,i) << std::endl; - } - - } - } - } -#ifdef EVOLVE_VECTOR_POTENTIAL - for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR] ; i++) { - if(myVe(AX1e,k,j,i) != d.Ve(AX1e,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Ve(AX1e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; - - } - - } - } - } - for(int k = d.beg[KDIR]; k < d.end[KDIR]+KOFFSET ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR] ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { - if(myVe(AX2e,k,j,i) != d.Ve(AX2e,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Ve(AX2e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl; - } - - } - } - } - for(int k = d.beg[KDIR]; k < d.end[KDIR] ; k++) { - for(int j = d.beg[JDIR]; j < d.end[JDIR]+JOFFSET ; j++) { - for(int i = d.beg[IDIR]; i < d.end[IDIR]+IOFFSET ; i++) { - if(myVe(AX3e,k,j,i) != d.Ve(AX3e,k,j,i)) { - errornum++; - idfx::cout << "-----------------------------------------" << std::endl - << " Error in Ve(AX3e) at (i,j,k) = ( " << i << ", " << j << ", " << k << ")" << std::endl - << " Coordinates (x1,x2,x3) = ( " << d.x[IDIR](i) << ", " << d.x[JDIR](j) << ", " << d.x[KDIR](k) << ")" << std::endl - << "Original= " << myVs(BX3s,k,j,i) << " New=" << d.Vs(BX3s,k,j,i) << " diff=" << myVs(BX3s,k,j,i)-d.Vs(BX3s,k,j,i) << std::endl; - } - - } - } - } -#endif - - idfx::cout << "Analysis: consistency check done with " << errornum << " errors " << std::endl; - if(errornum>0) { - IDEFIX_ERROR("Restart from dump failed validation"); - } -} // Initialisation routine. Can be used to allocate // Arrays or variables which are used later on Setup::Setup(Input &input, Grid &grid, DataBlock &data, Output &output) { - if(input.CheckEntry("Output","analysis")>0) { - output.EnrollAnalysis(&Analysis); - myOutput = &output; - outnum=0; - } } // This routine initialize the flow diff --git a/test/MHD/OrszagTang3D/testme.json b/test/MHD/OrszagTang3D/testme.json new file mode 100644 index 000000000..a37b54164 --- /dev/null +++ b/test/MHD/OrszagTang3D/testme.json @@ -0,0 +1,34 @@ +{ + "namings": "ini,single,vectPot,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": [false, true], + "reconstruction": 2, + "single": [false], + "mpi": [false, true], + "dec": ["2","2","2"], + "standardTest": false, + "tolerance": 1e-13 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "vectPot": [false], + "reconstruction": 2, + "mpi": [false, true], + "single": [true], + "dec": ["2","2","2"], + "standardTest": false, + "tolerance": 1e-13 + } + ], + "when": { + "conditions": { + "single": true + }, + "apply" : { + "tolerance": 1e-6 + } + } +} diff --git a/test/MHD/OrszagTang3D/testme.py b/test/MHD/OrszagTang3D/testme.py index 600a5ef5f..7a66150c5 100755 --- a/test/MHD/OrszagTang3D/testme.py +++ b/test/MHD/OrszagTang3D/testme.py @@ -27,14 +27,8 @@ def testMe(test): test.makeReference(filename="dump.0001.dmp") test.nonRegressionTest(filename="dump.0001.dmp",tolerance=tol) - # Check restarts - test.run("idefix-checkrestart.ini") - #force override the inputfile since the result should be identical - test.inifile="idefix.ini" - test.nonRegressionTest(filename="dump.0002.dmp",tolerance=tol) - -test=tst.idfxTest() +test=tst.idfxTest(__file__) # if no decomposition is specified, use that one if not test.dec: diff --git a/test/MHD/ResistiveAlfvenWave/testme.json b/test/MHD/ResistiveAlfvenWave/testme.json new file mode 100644 index 000000000..56ae122ec --- /dev/null +++ b/test/MHD/ResistiveAlfvenWave/testme.json @@ -0,0 +1,22 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-rkl.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "tolerance": 1e-14 + } + ], + "when": { + "conditions": { + "ini": "idefix-rkl.ini" + }, + "apply" : { + "tolerance": 1e-10 + } + } +} diff --git a/test/MHD/ResistiveAlfvenWave/testme.py b/test/MHD/ResistiveAlfvenWave/testme.py index f9b8ae56f..ebc29ab57 100755 --- a/test/MHD/ResistiveAlfvenWave/testme.py +++ b/test/MHD/ResistiveAlfvenWave/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/ShearingBox/testme.json b/test/MHD/ShearingBox/testme.json new file mode 100644 index 000000000..71ae0ab0d --- /dev/null +++ b/test/MHD/ShearingBox/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-fargo.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","1","2"], + "tolerance": 1e-14 + } + ] +} diff --git a/test/MHD/ShearingBox/testme.py b/test/MHD/ShearingBox/testme.py index c89d24b52..ccee1a69c 100755 --- a/test/MHD/ShearingBox/testme.py +++ b/test/MHD/ShearingBox/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename="dump.0001.dmp",tolerance=mytol) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','1','2'] @@ -41,3 +41,6 @@ def testMe(test): test.reconstruction=2 test.mpi=False testMe(test) + + test.mpi=True + testMe(test) diff --git a/test/MHD/clessTDiffusion/testme.json b/test/MHD/clessTDiffusion/testme.json new file mode 100644 index 000000000..d615696f5 --- /dev/null +++ b/test/MHD/clessTDiffusion/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": false, + "vectPot": false, + "tolerance": 2e-15 + } + ] +} diff --git a/test/MHD/clessTDiffusion/testme.py b/test/MHD/clessTDiffusion/testme.py index 6ed5b1648..41193e141 100755 --- a/test/MHD/clessTDiffusion/testme.py +++ b/test/MHD/clessTDiffusion/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name, tolerance=2e-15) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/sod-iso/testme.json b/test/MHD/sod-iso/testme.json new file mode 100644 index 000000000..9f5534fbc --- /dev/null +++ b/test/MHD/sod-iso/testme.json @@ -0,0 +1,43 @@ +{ + "namings": "ini,reconstruction,single", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hlld.ini","idefix-tvdlf.ini"], + "vectPot": false, + "single": false, + "reconstruction": [2,3], + "mpi": false, + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-rk3.ini","idefix-hlld-rk3.ini"], + "vectPot": false, + "single": false, + "reconstruction": [4], + "mpi": false, + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hlld.ini","idefix-tvdlf.ini"], + "vectPot": false, + "reconstruction": [2], + "mpi": false, + "single": true, + "standardTest": false, + "tolerance": 0 + } + ], + "when": [ + { + "conditions": { + "ini": "idefix-rkl.ini" + }, + "apply": { + "tolerance": 1e-10 + } + } + ] +} diff --git a/test/MHD/sod-iso/testme.py b/test/MHD/sod-iso/testme.py index 798aace57..c85cc4ce7 100755 --- a/test/MHD/sod-iso/testme.py +++ b/test/MHD/sod-iso/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/sod/testme.json b/test/MHD/sod/testme.json new file mode 100644 index 000000000..1501e3049 --- /dev/null +++ b/test/MHD/sod/testme.json @@ -0,0 +1,33 @@ +{ + "namings": "ini,reconstruction,single", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hlld.ini","idefix-tvdlf.ini"], + "vectPot": false, + "single": false, + "reconstruction": [2,3], + "mpi": false, + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix-rk3.ini","idefix-hlld-rk3.ini"], + "vectPot": false, + "single": false, + "reconstruction": 4, + "mpi": false, + "standardTest": false, + "tolerance": 0 + },{ + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-hll.ini","idefix-hlld.ini","idefix-tvdlf.ini"], + "vectPot": false, + "reconstruction": 2, + "mpi": false, + "single": true, + "standardTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/MHD/sod/testme.py b/test/MHD/sod/testme.py index 798aace57..c85cc4ce7 100755 --- a/test/MHD/sod/testme.py +++ b/test/MHD/sod/testme.py @@ -27,7 +27,7 @@ def testMe(test): test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/sphBragTDiffusion/testme.json b/test/MHD/sphBragTDiffusion/testme.json new file mode 100644 index 000000000..c4cb9a334 --- /dev/null +++ b/test/MHD/sphBragTDiffusion/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "tolerance": 2e-15 + } + ] +} diff --git a/test/MHD/sphBragTDiffusion/testme.py b/test/MHD/sphBragTDiffusion/testme.py index 6ed5b1648..41193e141 100755 --- a/test/MHD/sphBragTDiffusion/testme.py +++ b/test/MHD/sphBragTDiffusion/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.nonRegressionTest(filename=name, tolerance=2e-15) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/MHD/sphBragViscosity/testme.json b/test/MHD/sphBragViscosity/testme.json new file mode 100644 index 000000000..900a34a77 --- /dev/null +++ b/test/MHD/sphBragViscosity/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": false, + "standardTest": false, + "tolerance": 1e-15 + } + ] +} diff --git a/test/MHD/sphBragViscosity/testme.py b/test/MHD/sphBragViscosity/testme.py index 254417f44..b6c3467ea 100755 --- a/test/MHD/sphBragViscosity/testme.py +++ b/test/MHD/sphBragViscosity/testme.py @@ -29,7 +29,7 @@ def testMe(test): test.nonRegressionTest(filename=name, tolerance=1e-15) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/Planet/Planet3Body/testme.json b/test/Planet/Planet3Body/testme.json new file mode 100644 index 000000000..2b2fad202 --- /dev/null +++ b/test/Planet/Planet3Body/testme.json @@ -0,0 +1,15 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "tolerance": 2e-11 + } + ] +} diff --git a/test/Planet/Planet3Body/testme.py b/test/Planet/Planet3Body/testme.py index c11d08963..6edac53ee 100755 --- a/test/Planet/Planet3Body/testme.py +++ b/test/Planet/Planet3Body/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/Planet/PlanetMigration2D/testme.json b/test/Planet/PlanetMigration2D/testme.json new file mode 100644 index 000000000..6e1e08537 --- /dev/null +++ b/test/Planet/PlanetMigration2D/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","2"], + "tolerance": 1e-13 + } + ] +} diff --git a/test/Planet/PlanetMigration2D/testme.py b/test/Planet/PlanetMigration2D/testme.py index 0921d4e62..6c435cbd3 100755 --- a/test/Planet/PlanetMigration2D/testme.py +++ b/test/Planet/PlanetMigration2D/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/Planet/PlanetPlanetRK42D/testme.json b/test/Planet/PlanetPlanetRK42D/testme.json new file mode 100644 index 000000000..0181b072f --- /dev/null +++ b/test/Planet/PlanetPlanetRK42D/testme.json @@ -0,0 +1,25 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0002.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","2"], + "tolerance": 1e-13, + "multirun": [ + { + "nonRegressionTest": true, + "standardTest": true + },{ + "restart": true, + "restart_no_overwrite": ["dump.0001.dmp", "data.0005.vtk"] + } + ] + } + ] +} diff --git a/test/Planet/PlanetPlanetRK42D/testme.py b/test/Planet/PlanetPlanetRK42D/testme.py index 26cb8ee21..9521aabac 100755 --- a/test/Planet/PlanetPlanetRK42D/testme.py +++ b/test/Planet/PlanetPlanetRK42D/testme.py @@ -38,7 +38,7 @@ def testMe(test): assert dump_mtime == os.path.getmtime("dump.0001.dmp"), "Dump was overwritten on restart" assert vtk_mtime == os.path.getmtime("data.0005.vtk"), "VTK file was overwritten on restart" -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/Planet/PlanetSpiral2D/testme.json b/test/Planet/PlanetSpiral2D/testme.json new file mode 100644 index 000000000..6e1e08537 --- /dev/null +++ b/test/Planet/PlanetSpiral2D/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","2"], + "tolerance": 1e-13 + } + ] +} diff --git a/test/Planet/PlanetSpiral2D/testme.py b/test/Planet/PlanetSpiral2D/testme.py index 0921d4e62..6c435cbd3 100755 --- a/test/Planet/PlanetSpiral2D/testme.py +++ b/test/Planet/PlanetSpiral2D/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/Planet/PlanetTorque3D/testme.json b/test/Planet/PlanetTorque3D/testme.json new file mode 100644 index 000000000..085fb6c0f --- /dev/null +++ b/test/Planet/PlanetTorque3D/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","2","2"], + "tolerance": 1e-13 + } + ] +} diff --git a/test/Planet/PlanetTorque3D/testme.py b/test/Planet/PlanetTorque3D/testme.py index b4f04a507..4393636ea 100755 --- a/test/Planet/PlanetTorque3D/testme.py +++ b/test/Planet/PlanetTorque3D/testme.py @@ -28,7 +28,7 @@ def testMe(test): test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2','2'] diff --git a/test/Planet/PlanetsIsActiveRK52D/testme.json b/test/Planet/PlanetsIsActiveRK52D/testme.json new file mode 100644 index 000000000..30848aecd --- /dev/null +++ b/test/Planet/PlanetsIsActiveRK52D/testme.json @@ -0,0 +1,17 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix-rk4.ini", "idefix-rk5.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2","2"], + "nonRegressionTest": false, + "tolerance": 1e-13 + } + ] +} diff --git a/test/Planet/PlanetsIsActiveRK52D/testme.py b/test/Planet/PlanetsIsActiveRK52D/testme.py index ce0d405c0..aca39ae53 100755 --- a/test/Planet/PlanetsIsActiveRK52D/testme.py +++ b/test/Planet/PlanetsIsActiveRK52D/testme.py @@ -26,7 +26,7 @@ def testMe(test): # test.nonRegressionTest(filename=name,tolerance=tolerance) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2'] diff --git a/test/SelfGravity/DustyCollapse/testme.json b/test/SelfGravity/DustyCollapse/testme.json new file mode 100644 index 000000000..8afbcdb6d --- /dev/null +++ b/test/SelfGravity/DustyCollapse/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "nonRegressionTest": false, + "tolerance": 1e-13 + } + ] +} diff --git a/test/SelfGravity/DustyCollapse/testme.py b/test/SelfGravity/DustyCollapse/testme.py index 689038265..2df3893ab 100755 --- a/test/SelfGravity/DustyCollapse/testme.py +++ b/test/SelfGravity/DustyCollapse/testme.py @@ -25,7 +25,7 @@ def testMe(test): test.standardTest() -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/SelfGravity/JeansInstability/testme.json b/test/SelfGravity/JeansInstability/testme.json new file mode 100644 index 000000000..bf4819389 --- /dev/null +++ b/test/SelfGravity/JeansInstability/testme.json @@ -0,0 +1,17 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-cg.ini"], + "noplot": true, + "vectPot": false, + "single": false, + "reconstruction": 2, + "mpi": [false, true], + "dec": ["2"], + "nonRegressionTest": false, + "tolerance": 1e-13 + } + ] +} diff --git a/test/SelfGravity/JeansInstability/testme.py b/test/SelfGravity/JeansInstability/testme.py index ff2f39a83..833feb262 100755 --- a/test/SelfGravity/JeansInstability/testme.py +++ b/test/SelfGravity/JeansInstability/testme.py @@ -26,7 +26,7 @@ def testMe(test): test.standardTest() -test=tst.idfxTest() +test=tst.idfxTest(__file__) # if no decomposition is specified, use that one if not test.dec: diff --git a/test/SelfGravity/RandomSphere/testme.json b/test/SelfGravity/RandomSphere/testme.json new file mode 100644 index 000000000..d5cda3135 --- /dev/null +++ b/test/SelfGravity/RandomSphere/testme.json @@ -0,0 +1,14 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-cg.ini","idefix-minres.ini"], + "noplot": true, + "mpi": [false, true], + "dec": ["2","2","1"], + "nonRegressionTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/SelfGravity/RandomSphere/testme.py b/test/SelfGravity/RandomSphere/testme.py index 38fc30d92..65123498b 100755 --- a/test/SelfGravity/RandomSphere/testme.py +++ b/test/SelfGravity/RandomSphere/testme.py @@ -26,7 +26,7 @@ def testMe(test): #test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.dec: test.dec=['2','2','1'] diff --git a/test/SelfGravity/RandomSphereCartesian/testme.json b/test/SelfGravity/RandomSphereCartesian/testme.json new file mode 100644 index 000000000..85f0f3889 --- /dev/null +++ b/test/SelfGravity/RandomSphereCartesian/testme.json @@ -0,0 +1,12 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini","idefix-cg.ini","idefix-minres.ini","idefix-jacobi.ini"], + "noplot": true, + "nonRegressionTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/SelfGravity/RandomSphereCartesian/testme.py b/test/SelfGravity/RandomSphereCartesian/testme.py index 64b4a9ba5..598aa3f21 100755 --- a/test/SelfGravity/RandomSphereCartesian/testme.py +++ b/test/SelfGravity/RandomSphereCartesian/testme.py @@ -26,7 +26,7 @@ def testMe(test): #test.nonRegressionTest(filename=name) -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: testMe(test) else: diff --git a/test/SelfGravity/UniformCollapse/testme.json b/test/SelfGravity/UniformCollapse/testme.json new file mode 100644 index 000000000..3de00c4df --- /dev/null +++ b/test/SelfGravity/UniformCollapse/testme.json @@ -0,0 +1,16 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "noplot": true, + "single": false, + "reconstruction": 2, + "mpi": false, + "standardTest": false, + "tolerance": 0, + "nonRegressionTest": false + } + ] +} diff --git a/test/SelfGravity/UniformCollapse/testme.py b/test/SelfGravity/UniformCollapse/testme.py index 21debc0e0..69918896b 100755 --- a/test/SelfGravity/UniformCollapse/testme.py +++ b/test/SelfGravity/UniformCollapse/testme.py @@ -25,7 +25,7 @@ def testMe(test): test.standardTest() -test=tst.idfxTest() +test=tst.idfxTest(__file__) if not test.all: if(test.check): diff --git a/test/python_requirements.txt b/test/python_requirements.txt index 77ce6e9c2..9cf2383a2 100644 --- a/test/python_requirements.txt +++ b/test/python_requirements.txt @@ -8,3 +8,7 @@ scipy>=1.2.3 # note that no version of inifix supports Python older than 3.6 inifix>=0.11.2 + +# To run the test suite, we can use pytest, mostly any version +# 6.0 is the one available on system available on debian-11 +pytest >= 6.0 diff --git a/test/utils/columnDensity/testme.json b/test/utils/columnDensity/testme.json new file mode 100644 index 000000000..718413c4d --- /dev/null +++ b/test/utils/columnDensity/testme.json @@ -0,0 +1,13 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "mpi": [false,true], + "nonRegressionTest": false, + "standardTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/utils/columnDensity/testme.py b/test/utils/columnDensity/testme.py index 6f017d7d8..cd94eb4ef 100755 --- a/test/utils/columnDensity/testme.py +++ b/test/utils/columnDensity/testme.py @@ -9,7 +9,7 @@ sys.path.append(os.getenv("IDEFIX_DIR")) import pytools.idfx_test as tst -test=tst.idfxTest() +test=tst.idfxTest(__file__) test.configure() test.compile() diff --git a/test/utils/dumpImage/testme.json b/test/utils/dumpImage/testme.json new file mode 100644 index 000000000..718413c4d --- /dev/null +++ b/test/utils/dumpImage/testme.json @@ -0,0 +1,13 @@ +{ + "namings": "ini,mpi", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini": ["idefix.ini"], + "mpi": [false,true], + "nonRegressionTest": false, + "standardTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/utils/dumpImage/testme.py b/test/utils/dumpImage/testme.py index 6f017d7d8..cd94eb4ef 100755 --- a/test/utils/dumpImage/testme.py +++ b/test/utils/dumpImage/testme.py @@ -9,7 +9,7 @@ sys.path.append(os.getenv("IDEFIX_DIR")) import pytools.idfx_test as tst -test=tst.idfxTest() +test=tst.idfxTest(__file__) test.configure() test.compile() diff --git a/test/utils/lookupTable/testme.json b/test/utils/lookupTable/testme.json new file mode 100644 index 000000000..8ea6f6961 --- /dev/null +++ b/test/utils/lookupTable/testme.json @@ -0,0 +1,12 @@ +{ + "namings": "ini", + "variants": [ + { + "dumpname": "dump.0001.dmp", + "ini":["idefix.ini"], + "nonRegressionTest": false, + "standardTest": false, + "tolerance": 0 + } + ] +} diff --git a/test/utils/lookupTable/testme.py b/test/utils/lookupTable/testme.py index d40884652..53f6607f2 100755 --- a/test/utils/lookupTable/testme.py +++ b/test/utils/lookupTable/testme.py @@ -30,7 +30,7 @@ def MakeNumpyFile(): -test=tst.idfxTest() +test=tst.idfxTest(__file__) MakeNumpyFile() test.configure()