diff --git a/BaseLib/MPI.h b/BaseLib/MPI.h
index 9318cf354e265888c556bcd921668a1256de02d3..1d58b374b144624ebff56f94e24c679b9ff45b82 100644
--- a/BaseLib/MPI.h
+++ b/BaseLib/MPI.h
@@ -18,19 +18,6 @@
 namespace BaseLib::MPI
 {
 
-static inline int reduceMin(int const val)
-{
-#ifdef USE_PETSC
-    // Reduce operations for interprocess communications while using Petsc
-    int result;
-    MPI_Allreduce(&val, &result, 1, MPI_INTEGER, MPI_MIN, MPI_COMM_WORLD);
-    return result;
-#else
-    // Reduce operations for interprocess communications without using Petsc
-    return val;
-#endif
-}
-
 #ifdef USE_PETSC
 struct Mpi
 {
@@ -108,4 +95,16 @@ static T allreduce(T const& value, MPI_Op const& mpi_op, Mpi const& mpi)
     return result;
 }
 #endif
+
+/// The reduction is implemented transparently for with and without MPI. In the
+/// latter case the input value is returned.
+static inline int reduceMin(int const val)
+{
+#ifdef USE_PETSC
+    return allreduce(val, MPI_MIN, Mpi{MPI_COMM_WORLD});
+#else
+    return val;
+#endif
+}
+
 }  // namespace BaseLib::MPI
diff --git a/MeshGeoToolsLib/BoundaryElementsAtPoint.cpp b/MeshGeoToolsLib/BoundaryElementsAtPoint.cpp
index 3979edd184fc7dacac1dd48856b2557acf4e8202..56e83a22c75d642cdf451eeaf673168bddec0e93 100644
--- a/MeshGeoToolsLib/BoundaryElementsAtPoint.cpp
+++ b/MeshGeoToolsLib/BoundaryElementsAtPoint.cpp
@@ -9,10 +9,7 @@
 
 #include "BoundaryElementsAtPoint.h"
 
-#ifdef USE_PETSC
-#include <mpi.h>
-#endif
-
+#include "BaseLib/MPI.h"
 #include "GeoLib/Point.h"
 #include "MathLib/Point3d.h"
 #include "MeshGeoToolsLib/MeshNodeSearcher.h"
@@ -32,10 +29,8 @@ BoundaryElementsAtPoint::BoundaryElementsAtPoint(
 
 #ifdef USE_PETSC
     std::size_t const number_of_found_nodes_at_rank = node_ids.size();
-    std::size_t number_of_total_found_nodes = 0;
-
-    MPI_Allreduce(&number_of_found_nodes_at_rank, &number_of_total_found_nodes,
-                  1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);
+    std::size_t const number_of_total_found_nodes = BaseLib::MPI::allreduce(
+        number_of_found_nodes_at_rank, MPI_SUM, BaseLib::MPI::Mpi{});
 
     if (number_of_total_found_nodes == 0)
     {
diff --git a/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp b/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
index f0bb10ddf41640b60bfb4b96e8c6c24765bd5723..faf10327ae865bc7c2bf1b180816d1ddc5b28df0 100644
--- a/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
+++ b/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
@@ -265,10 +265,8 @@ void NodePartitionedMeshReader::readProperties(
     is.seekg(offset);
     std::optional<MeshLib::IO::PropertyVectorPartitionMetaData> pvpmd(
         MeshLib::IO::readPropertyVectorPartitionMetaData(is));
-    bool pvpmd_read_ok = static_cast<bool>(pvpmd);
-    bool all_pvpmd_read_ok;
-    MPI_Allreduce(&pvpmd_read_ok, &all_pvpmd_read_ok, 1, MPI_C_BOOL, MPI_LOR,
-                  mpi_.communicator);
+    bool const all_pvpmd_read_ok =
+        BaseLib::MPI::allreduce(static_cast<bool>(pvpmd), MPI_LOR, mpi_);
     if (!all_pvpmd_read_ok)
     {
         OGS_FATAL(
diff --git a/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp b/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
index 3e7aa4fb87e569983291d3a66f39bfe20fa3ce05..9eeee236a6b85a69b8723a4f632cbee712eec33a 100644
--- a/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
+++ b/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
@@ -184,9 +184,8 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
     // other ranks and at the same time receive from all other ranks
     // first send the sizes to all other to are able to allocate buffer
     auto const size = subdomain_node_id_to_bulk_node_id.size();
-    std::size_t global_number_of_subdomain_node_id_to_bulk_node_id = 0;
-    MPI_Allreduce(&size, &global_number_of_subdomain_node_id_to_bulk_node_id, 1,
-                  MPI_UNSIGNED_LONG, MPI_SUM, mpi.communicator);
+    std::size_t const global_number_of_subdomain_node_id_to_bulk_node_id =
+        BaseLib::MPI::allreduce(size, MPI_SUM, mpi);
 
     DBUG("[{}] global_number_of_subdomain_node_id_to_bulk_node_id: '{}' ",
          subdomain_mesh->getName(),
@@ -343,13 +342,8 @@ std::vector<std::size_t> computeGlobalNodeIDsOfSubDomainPartition(
 unsigned long getNumberOfGlobalNodes(Mesh const* subdomain_mesh)
 {
     // sum all nodes over all partitions in number_of_global_nodes
-    unsigned long number_of_local_nodes = subdomain_mesh->getNodes().size();
-    unsigned long number_of_global_nodes = 0;
-
-    MPI_Comm mpi_comm = MPI_COMM_WORLD;
-
-    MPI_Allreduce(&number_of_local_nodes, &number_of_global_nodes, 1,
-                  MPI_UNSIGNED_LONG, MPI_SUM, mpi_comm);
+    unsigned long const number_of_global_nodes = BaseLib::MPI::allreduce(
+        subdomain_mesh->getNodes().size(), MPI_SUM, BaseLib::MPI::Mpi{});
     DBUG("[{}] number_of_global_nodes: {}'", subdomain_mesh->getName(),
          number_of_global_nodes);
     return number_of_global_nodes;
diff --git a/NumLib/DOF/DOFTableUtil.cpp b/NumLib/DOF/DOFTableUtil.cpp
index 93266fc65862929b316109aa30466fea0b2c0d24..4241919a6ecc5a3281bd9283ddb353f271096bdf 100644
--- a/NumLib/DOF/DOFTableUtil.cpp
+++ b/NumLib/DOF/DOFTableUtil.cpp
@@ -14,6 +14,7 @@
 #include <cassert>
 #include <functional>
 
+#include "BaseLib/MPI.h"
 #include "MathLib/LinAlg/Eigen/EigenMapTools.h"
 namespace NumLib
 {
@@ -44,15 +45,12 @@ double norm(GlobalVector const& x, unsigned const global_component,
 double norm1(GlobalVector const& x, unsigned const global_component,
              LocalToGlobalIndexMap const& dof_table, MeshLib::Mesh const& mesh)
 {
-    double res =
+    double const res =
         norm(x, global_component, dof_table, mesh,
              [](double res, double value) { return res + std::abs(value); });
 
 #ifdef USE_PETSC
-    double global_result = 0.0;
-    MPI_Allreduce(&res, &global_result, 1, MPI_DOUBLE, MPI_SUM,
-                  PETSC_COMM_WORLD);
-    res = global_result;
+    return BaseLib::MPI::allreduce(res, MPI_SUM, BaseLib::MPI::Mpi{});
 #endif
     return res;
 }
@@ -60,15 +58,13 @@ double norm1(GlobalVector const& x, unsigned const global_component,
 double norm2(GlobalVector const& x, unsigned const global_component,
              LocalToGlobalIndexMap const& dof_table, MeshLib::Mesh const& mesh)
 {
-    double res =
+    double const res =
         norm(x, global_component, dof_table, mesh,
              [](double res, double value) { return res + value * value; });
 
 #ifdef USE_PETSC
-    double global_result = 0.0;
-    MPI_Allreduce(&res, &global_result, 1, MPI_DOUBLE, MPI_SUM,
-                  PETSC_COMM_WORLD);
-    res = global_result;
+    return std::sqrt(
+        BaseLib::MPI::allreduce(res, MPI_SUM, BaseLib::MPI::Mpi{}));
 #endif
     return std::sqrt(res);
 }
@@ -77,15 +73,12 @@ double normInfinity(GlobalVector const& x, unsigned const global_component,
                     LocalToGlobalIndexMap const& dof_table,
                     MeshLib::Mesh const& mesh)
 {
-    double res = norm(x, global_component, dof_table, mesh,
-                      [](double res, double value)
-                      { return std::max(res, std::abs(value)); });
+    double const res =
+        norm(x, global_component, dof_table, mesh, [](double res, double value)
+             { return std::max(res, std::abs(value)); });
 
 #ifdef USE_PETSC
-    double global_result = 0.0;
-    MPI_Allreduce(&res, &global_result, 1, MPI_DOUBLE, MPI_MAX,
-                  PETSC_COMM_WORLD);
-    res = global_result;
+    return BaseLib::MPI::allreduce(res, MPI_MAX, BaseLib::MPI::Mpi{});
 #endif
     return res;
 }
diff --git a/ProcessLib/PhaseField/PhaseFieldProcess.cpp b/ProcessLib/PhaseField/PhaseFieldProcess.cpp
index 3020a93570299bd68a0b1945c35cebdef71f1edd..8c150546174e887cfc6594fdf869492efd8271fd 100644
--- a/ProcessLib/PhaseField/PhaseFieldProcess.cpp
+++ b/ProcessLib/PhaseField/PhaseFieldProcess.cpp
@@ -12,6 +12,7 @@
 
 #include <cassert>
 
+#include "BaseLib/MPI.h"
 #include "MeshLib/Utils/getOrCreateMeshProperty.h"
 #include "NumLib/DOF/ComputeSparsityPattern.h"
 #include "PhaseFieldFEM.h"
@@ -307,15 +308,13 @@ void PhaseFieldProcess<DisplacementDim>::postTimestepConcreteProcess(
             _process_data.pressure_work);
 
 #ifdef USE_PETSC
-        double const elastic_energy = _process_data.elastic_energy;
-        MPI_Allreduce(&elastic_energy, &_process_data.elastic_energy, 1,
-                      MPI_DOUBLE, MPI_SUM, PETSC_COMM_WORLD);
-        double const surface_energy = _process_data.surface_energy;
-        MPI_Allreduce(&surface_energy, &_process_data.surface_energy, 1,
-                      MPI_DOUBLE, MPI_SUM, PETSC_COMM_WORLD);
-        double const pressure_work = _process_data.pressure_work;
-        MPI_Allreduce(&pressure_work, &_process_data.pressure_work, 1,
-                      MPI_DOUBLE, MPI_SUM, PETSC_COMM_WORLD);
+        BaseLib::MPI::Mpi mpi{};
+        _process_data.elastic_energy =
+            BaseLib::MPI::allreduce(_process_data.elastic_energy, MPI_SUM, mpi);
+        _process_data.surface_energy =
+            BaseLib::MPI::allreduce(_process_data.surface_energy, MPI_SUM, mpi);
+        _process_data.pressure_work =
+            BaseLib::MPI::allreduce(_process_data.pressure_work, MPI_SUM, mpi);
 #endif
 
         INFO(
@@ -361,9 +360,8 @@ void PhaseFieldProcess<DisplacementDim>::postNonLinearSolverConcreteProcess(
         getActiveElementIDs(), dof_tables, x, t, _process_data.crack_volume);
 
 #ifdef USE_PETSC
-    double const crack_volume = _process_data.crack_volume;
-    MPI_Allreduce(&crack_volume, &_process_data.crack_volume, 1, MPI_DOUBLE,
-                  MPI_SUM, PETSC_COMM_WORLD);
+    _process_data.crack_volume = BaseLib::MPI::allreduce(
+        _process_data.crack_volume, MPI_SUM, BaseLib::MPI::Mpi{});
 #endif
 
     INFO("Integral of crack: {:g}", _process_data.crack_volume);
diff --git a/Tests/MathLib/TestGlobalMatrixInterface.cpp b/Tests/MathLib/TestGlobalMatrixInterface.cpp
index 075084ac1efe9a95b80e1f929968d379e0d9f43c..1e2337c1b1a8ad530ce6959984c6d113aec23b42 100644
--- a/Tests/MathLib/TestGlobalMatrixInterface.cpp
+++ b/Tests/MathLib/TestGlobalMatrixInterface.cpp
@@ -17,6 +17,7 @@
 
 #include <Eigen/Core>
 
+#include "BaseLib/MPI.h"
 #include "MathLib/LinAlg/LinAlg.h"
 
 #if defined(USE_PETSC)
@@ -58,24 +59,17 @@ void checkGlobalMatrixInterface(T_MATRIX& m)
 template <class T_MATRIX, class T_VECTOR>
 void checkGlobalMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
 {
-    int msize;
-    MPI_Comm_size(PETSC_COMM_WORLD, &msize);
-    int mrank;
-    MPI_Comm_rank(PETSC_COMM_WORLD, &mrank);
+    BaseLib::MPI::Mpi mpi{PETSC_COMM_WORLD};
 
-    ASSERT_EQ(3u, msize);
+    ASSERT_EQ(3u, mpi.size);
     ASSERT_EQ(m.getRangeEnd() - m.getRangeBegin(), m.getNumberOfLocalRows());
 
-    int gathered_rows;
-    int local_rows = m.getNumberOfLocalRows();
-    MPI_Allreduce(&local_rows, &gathered_rows, 1, MPI_INT, MPI_SUM,
-                  PETSC_COMM_WORLD);
+    int const gathered_rows =
+        BaseLib::MPI::allreduce(m.getNumberOfLocalRows(), MPI_SUM, mpi);
     ASSERT_EQ(m.getNumberOfRows(), gathered_rows);
 
-    int gathered_cols;
-    int local_cols = m.getNumberOfLocalColumns();
-    MPI_Allreduce(&local_cols, &gathered_cols, 1, MPI_INT, MPI_SUM,
-                  PETSC_COMM_WORLD);
+    int const gathered_cols =
+        BaseLib::MPI::allreduce(m.getNumberOfLocalColumns(), MPI_SUM, mpi);
     ASSERT_EQ(m.getNumberOfColumns(), gathered_cols);
 
     // Add entries
@@ -87,8 +81,8 @@ void checkGlobalMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
 
     std::vector<GlobalIndexType> row_pos(2);
     std::vector<GlobalIndexType> col_pos(2);
-    row_pos[0] = 2 * mrank;
-    row_pos[1] = 2 * mrank + 1;
+    row_pos[0] = 2 * mpi.rank;
+    row_pos[1] = 2 * mpi.rank + 1;
     col_pos[0] = row_pos[0];
     col_pos[1] = row_pos[1];
 
@@ -112,10 +106,10 @@ void checkGlobalMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
     ASSERT_EQ(sqrt(3 * (3 * 3 + 7 * 7)), norm2(y));
 
     // set a value
-    m_c.set(2 * mrank, 2 * mrank, 5.0);
+    m_c.set(2 * mpi.rank, 2 * mpi.rank, 5.0);
     MathLib::finalizeMatrixAssembly(m_c);
     // add a value
-    m_c.add(2 * mrank + 1, 2 * mrank + 1, 5.0);
+    m_c.add(2 * mpi.rank + 1, 2 * mpi.rank + 1, 5.0);
     MathLib::finalizeMatrixAssembly(m_c);
 
     matMult(m_c, v, y);
@@ -127,21 +121,16 @@ void checkGlobalMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
 template <class T_MATRIX, class T_VECTOR>
 void checkGlobalRectangularMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
 {
-    int mrank;
-    MPI_Comm_rank(PETSC_COMM_WORLD, &mrank);
+    BaseLib::MPI::Mpi mpi{PETSC_COMM_WORLD};
 
     ASSERT_EQ(m.getRangeEnd() - m.getRangeBegin(), m.getNumberOfLocalRows());
 
-    int gathered_rows;
-    int local_rows = m.getNumberOfLocalRows();
-    MPI_Allreduce(&local_rows, &gathered_rows, 1, MPI_INT, MPI_SUM,
-                  PETSC_COMM_WORLD);
+    int const gathered_rows =
+        BaseLib::MPI::allreduce(m.getNumberOfLocalRows(), MPI_SUM, mpi);
     ASSERT_EQ(m.getNumberOfRows(), gathered_rows);
 
-    int gathered_cols;
-    int local_cols = m.getNumberOfLocalColumns();
-    MPI_Allreduce(&local_cols, &gathered_cols, 1, MPI_INT, MPI_SUM,
-                  PETSC_COMM_WORLD);
+    int const gathered_cols =
+        BaseLib::MPI::allreduce(m.getNumberOfLocalColumns(), MPI_SUM, mpi);
     ASSERT_EQ(m.getNumberOfColumns(), gathered_cols);
 
     // Add entries
@@ -155,11 +144,11 @@ void checkGlobalRectangularMatrixInterfaceMPI(T_MATRIX& m, T_VECTOR& v)
 
     std::vector<GlobalIndexType> row_pos(2);
     std::vector<GlobalIndexType> col_pos(3);
-    row_pos[0] = 2 * mrank;
-    row_pos[1] = 2 * mrank + 1;
-    col_pos[0] = 3 * mrank;
-    col_pos[1] = 3 * mrank + 1;
-    col_pos[2] = 3 * mrank + 2;
+    row_pos[0] = 2 * mpi.rank;
+    row_pos[1] = 2 * mpi.rank + 1;
+    col_pos[0] = 3 * mpi.rank;
+    col_pos[1] = 3 * mpi.rank + 1;
+    col_pos[2] = 3 * mpi.rank + 2;
 
     m.add(row_pos, col_pos, loc_m);