diff --git a/Applications/Utils/TestReorderMesh.smk b/Applications/Utils/TestReorderMesh.smk index 932825d00679f579518eda3ce7a0bab0063ac04d..e3031b702fc3593c61cf1fe3d69f626030c6c3d6 100644 --- a/Applications/Utils/TestReorderMesh.smk +++ b/Applications/Utils/TestReorderMesh.smk @@ -77,7 +77,7 @@ rule execute_ogs: """ ( cd cube_1x1x1_{wildcards.type}_{wildcards.lx}x{wildcards.ly}x{wildcards.lz}_{wildcards.number_of_partitions} - mpirun -np {wildcards.number_of_partitions} ogs {prj_base_name}.prj -o results/ + mpirun --bind-to none -np {wildcards.number_of_partitions} ogs {prj_base_name}.prj -o results/ ) """ diff --git a/Tests/CMakeLists.txt b/Tests/CMakeLists.txt index bb27b9e14a494b55fa7043b836c88a9125ad0e9f..9cb270c635c6394cba92ee1a02d74e718e5e9ba7 100644 --- a/Tests/CMakeLists.txt +++ b/Tests/CMakeLists.txt @@ -177,7 +177,7 @@ add_custom_target(tests-cleanup ${CMAKE_COMMAND} -E remove -f testrunner.xml) if(OGS_USE_PETSC) if("${HOSTNAME}" MATCHES "frontend.*") - set(MPIRUN_ARGS --mca btl_openib_allow_ib 1) + set(MPIRUN_ARGS --mca btl_openib_allow_ib 1 --bind-to none) endif() set(TEST_FILTER_MPI --gtest_filter=-MPITest*) add_custom_target(tests diff --git a/Tests/Data/Notebooks/SimplePETSc.ipynb b/Tests/Data/Notebooks/SimplePETSc.ipynb index ec40a12f87fc724c000377b8612c8b1f7b66e61d..762e5f8468df860f2b41adce18438a27d7fba620 100644 --- a/Tests/Data/Notebooks/SimplePETSc.ipynb +++ b/Tests/Data/Notebooks/SimplePETSc.ipynb @@ -42,8 +42,8 @@ "if not out_dir.exists():\n", " out_dir.mkdir(parents=True)\n", "\n", - "print(f\"mpirun -np 2 ogs {prj_file} > out.txt\")\n", - "! mpirun -np 2 ogs {prj_file} > out.txt\n", + "print(f\"mpirun --bind-to none -np 2 ogs {prj_file} > out.txt\")\n", + "! mpirun --bind-to none -np 2 ogs {prj_file} > out.txt\n", "\n", "from datetime import datetime\n", "\n", diff --git a/Tests/Data/Parabolic/ComponentTransport/ReactiveTransport/DecayChain/DecayChain.ipynb b/Tests/Data/Parabolic/ComponentTransport/ReactiveTransport/DecayChain/DecayChain.ipynb index 2f6b2306b5787af2fec32fec44e002b4688590b5..1d1484c314e197986b2b2d9ebb38a9048ffa49fd 100644 --- a/Tests/Data/Parabolic/ComponentTransport/ReactiveTransport/DecayChain/DecayChain.ipynb +++ b/Tests/Data/Parabolic/ComponentTransport/ReactiveTransport/DecayChain/DecayChain.ipynb @@ -1354,9 +1354,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "mpirun -np 4 ogs ./GlobalImplicitApproach/MPI/4Processors/1d_decay_chain_GIA.prj -o _out > out.txt\n", + "mpirun --bind-to none -np 4 ogs ./GlobalImplicitApproach/MPI/4Processors/1d_decay_chain_GIA.prj -o _out > out.txt\n", "Execution time for the parallelized GIA model with 4 processors is 91.26 s\n", - "mpirun -np 8 ogs ./GlobalImplicitApproach/MPI/8Processors/1d_decay_chain_GIA.prj -o _out > out.txt\n", + "mpirun --bind-to none -np 8 ogs ./GlobalImplicitApproach/MPI/8Processors/1d_decay_chain_GIA.prj -o _out > out.txt\n", "--------------------------------------------------------------------------\n", "There are not enough slots available in the system to satisfy the 8\n", "slots that were requested by the application:\n", @@ -1404,9 +1404,9 @@ "prj_file_GIA_4 = f\"./GlobalImplicitApproach/MPI/4Processors/{prj_name}_GIA.prj\"\n", "prj_file_GIA_8 = f\"./GlobalImplicitApproach/MPI/8Processors/{prj_name}_GIA.prj\"\n", "\n", - "print(f\"mpirun -np 4 ogs {prj_file_GIA_4} -o {out_dir} > out.txt\")\n", + "print(f\"mpirun --bind-to none -np 4 ogs {prj_file_GIA_4} -o {out_dir} > out.txt\")\n", "start_time = time.time()\n", - "! mpirun -np 4 ogs {prj_file_GIA_4} -o {out_dir} > {out_dir}/out.txt\n", + "! mpirun --bind-to none -np 4 ogs {prj_file_GIA_4} -o {out_dir} > {out_dir}/out.txt\n", "end_time = time.time()\n", "runtime_GIA_4 = round(end_time - start_time, 2)\n", "print(\n", @@ -1415,9 +1415,9 @@ " \"s\",\n", ")\n", "\n", - "print(f\"mpirun -np 8 ogs {prj_file_GIA_8} -o {out_dir} > out.txt\")\n", + "print(f\"mpirun --bind-to none -np 8 ogs {prj_file_GIA_8} -o {out_dir} > out.txt\")\n", "start_time = time.time()\n", - "! mpirun -np 8 ogs {prj_file_GIA_8} -o {out_dir} > {out_dir}/out.txt\n", + "! mpirun --bind-to none -np 8 ogs {prj_file_GIA_8} -o {out_dir} > {out_dir}/out.txt\n", "end_time = time.time()\n", "runtime_GIA_8 = round(end_time - start_time, 2)\n", "print(\n", diff --git a/Tests/Data/PhaseField/PForthotropy_jupyter_notebook/sen_shear.ipynb b/Tests/Data/PhaseField/PForthotropy_jupyter_notebook/sen_shear.ipynb index 06271057ae121df064f7e09ac070ae4b6b87dabc..e6cec7ccb9aa893b7853d39798e01353af65ad94 100644 --- a/Tests/Data/PhaseField/PForthotropy_jupyter_notebook/sen_shear.ipynb +++ b/Tests/Data/PhaseField/PForthotropy_jupyter_notebook/sen_shear.ipynb @@ -207,7 +207,7 @@ " t0 = time.time()\n", " if MPI:\n", " print(f\" > OGS started execution with MPI - {ncores} cores...\")\n", - " ! mpirun -np {ncores} ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", + " ! mpirun --bind-to none -np {ncores} ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", " else:\n", " print(\" > OGS started execution - \")\n", " ! ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", diff --git a/Tests/Data/PhaseField/tpb_jupyter_notebook/TPB.ipynb b/Tests/Data/PhaseField/tpb_jupyter_notebook/TPB.ipynb index 6c28d09353c472f91b5afea40b8d47448f504ca4..977a1a5475778d04eb273e5f80266480049a0911 100644 --- a/Tests/Data/PhaseField/tpb_jupyter_notebook/TPB.ipynb +++ b/Tests/Data/PhaseField/tpb_jupyter_notebook/TPB.ipynb @@ -203,7 +203,7 @@ " t0 = time.time()\n", " if MPI:\n", " print(f\" > OGS started execution with MPI - {ncores} cores...\")\n", - " ! mpirun -np {ncores} ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", + " ! mpirun --bind-to none -np {ncores} ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", " else:\n", " print(\" > OGS started execution ...\")\n", " ! ogs {out_dir}/{prj_name} -o {output_dir} >> {logfile}\n", diff --git a/Tests/Data/Utils/partmesh/partmesh_roundtrip.md b/Tests/Data/Utils/partmesh/partmesh_roundtrip.md index ebf618bcdb49cfe54c1a05189557d813ecd5922f..9126fbc646c6011158bd683004499e153db23beb 100644 --- a/Tests/Data/Utils/partmesh/partmesh_roundtrip.md +++ b/Tests/Data/Utils/partmesh/partmesh_roundtrip.md @@ -28,7 +28,7 @@ num_partitions = 4 ``` ```python -! cd {out_dir} && mpirun -np {num_partitions} binaryToPVTU -i {input_mesh_basename} -o {input_mesh_basename} +! cd {out_dir} && mpirun --bind-to none -np {num_partitions} binaryToPVTU -i {input_mesh_basename} -o {input_mesh_basename} ``` Please note that `binaryToPVTU` has to be run with MPI and is therefore available on OGS PETSc configurations only.