diff --git a/MathLib/LinAlg/PETSc/PETScVector.cpp b/MathLib/LinAlg/PETSc/PETScVector.cpp
index bf12c6d3878ce03005e4b6b2abb060c76a1be842..018d5c3ea21af822385dd6a885dae4033830c871 100644
--- a/MathLib/LinAlg/PETSc/PETScVector.cpp
+++ b/MathLib/LinAlg/PETSc/PETScVector.cpp
@@ -24,6 +24,7 @@
 #include <cassert>
 
 #include "BaseLib/Error.h"
+#include "BaseLib/MPI.h"
 
 namespace MathLib
 {
@@ -116,21 +117,16 @@ void PETScVector::finalizeAssembly()
 void PETScVector::gatherLocalVectors(PetscScalar local_array[],
                                      PetscScalar global_array[]) const
 {
-    // Collect vectors from processors.
-    int size_rank;
-    MPI_Comm_size(PETSC_COMM_WORLD, &size_rank);
+    BaseLib::MPI::Mpi mpi{PETSC_COMM_WORLD};
 
     // number of elements to be sent for each rank
-    std::vector<PetscInt> i_cnt(size_rank);
-
-    MPI_Allgather(&size_loc_, 1, MPI_INT, &i_cnt[0], 1, MPI_INT,
-                  PETSC_COMM_WORLD);
+    std::vector<PetscInt> const i_cnt = BaseLib::MPI::allgather(size_loc_, mpi);
 
     // collect local array
     PetscInt offset = 0;
     // offset in the receive vector of the data from each rank
-    std::vector<PetscInt> i_disp(size_rank);
-    for (PetscInt i = 0; i < size_rank; i++)
+    std::vector<PetscInt> i_disp(mpi.size);
+    for (PetscInt i = 0; i < mpi.size; i++)
     {
         i_disp[i] = offset;
         offset += i_cnt[i];
diff --git a/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp b/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
index 7564c1773ddc206fcc7885884926c059288ba684..97bf7bd8b3fe889ea4e32351e43f6fcab7d7207d 100644
--- a/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
+++ b/MeshLib/IO/MPI_IO/NodePartitionedMeshReader.cpp
@@ -22,6 +22,7 @@
 
 #include "BaseLib/FileTools.h"
 #include "BaseLib/Logging.h"
+#include "BaseLib/MPI.h"
 #include "BaseLib/RunTime.h"
 #include "MeshLib/Elements/Elements.h"
 #include "MeshLib/MeshEnums.h"
@@ -404,15 +405,8 @@ MeshLib::NodePartitionedMesh* NodePartitionedMeshReader::newMesh(
     std::vector<MeshLib::Element*> const& mesh_elems,
     MeshLib::Properties const& properties) const
 {
-    std::vector<std::size_t> gathered_n_regular_base_nodes(mpi_.size);
-
-    MPI_Allgather(&_mesh_info.number_of_regular_base_nodes,
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  gathered_n_regular_base_nodes.data(),
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  mpi_.communicator);
+    std::vector<std::size_t> const gathered_n_regular_base_nodes =
+        BaseLib::MPI::allgather(_mesh_info.number_of_regular_base_nodes, mpi_);
 
     std::vector<std::size_t> n_regular_base_nodes_at_rank;
     n_regular_base_nodes_at_rank.push_back(0);
@@ -420,18 +414,11 @@ MeshLib::NodePartitionedMesh* NodePartitionedMeshReader::newMesh(
                      end(gathered_n_regular_base_nodes),
                      back_inserter(n_regular_base_nodes_at_rank));
 
-    std::vector<std::size_t> gathered_n_regular_high_order_nodes(
-        mpi_.size);
     std::size_t const n_regular_high_order_nodes =
         _mesh_info.number_of_regular_nodes -
         _mesh_info.number_of_regular_base_nodes;
-    MPI_Allgather(&n_regular_high_order_nodes,
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  gathered_n_regular_high_order_nodes.data(),
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  mpi_.communicator);
+    std::vector<std::size_t> const gathered_n_regular_high_order_nodes =
+        BaseLib::MPI::allgather(n_regular_high_order_nodes, mpi_);
 
     std::vector<std::size_t> n_regular_high_order_nodes_at_rank;
     n_regular_high_order_nodes_at_rank.push_back(0);
diff --git a/MeshLib/IO/XDMF/mpi/partition.cpp b/MeshLib/IO/XDMF/mpi/partition.cpp
index c1855dc2ac0c6a06f9d109e8f3fd327de1fac163..f6e313338497a667d6bbbdb22dc1e2f27ecfd94b 100644
--- a/MeshLib/IO/XDMF/mpi/partition.cpp
+++ b/MeshLib/IO/XDMF/mpi/partition.cpp
@@ -18,6 +18,7 @@
 #include <numeric>
 
 #include "BaseLib/Logging.h"
+#include "BaseLib/MPI.h"
 #include "MeshLib/IO/XDMF/fileIO.h"
 #include "getCommunicator.h"
 
@@ -33,22 +34,10 @@ bool isFileManager()
 PartitionInfo getPartitionInfo(std::size_t const size,
                                unsigned int const n_files)
 {
-    MPI_Comm const mpi_comm = getCommunicator(n_files).mpi_communicator;
-    int mpi_size;
-    int mpi_rank;
-    MPI_Comm_size(mpi_comm, &mpi_size);
-    MPI_Comm_rank(mpi_comm, &mpi_rank);
-
-    std::vector<std::size_t> partition_sizes;
-    partition_sizes.resize(mpi_size);
+    BaseLib::MPI::Mpi const mpi{getCommunicator(n_files).mpi_communicator};
 
-    MPI_Allgather(&size,
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  partition_sizes.data(),
-                  1,
-                  MPI_UNSIGNED_LONG,
-                  mpi_comm);
+    std::vector<std::size_t> const partition_sizes =
+        BaseLib::MPI::allgather(size, mpi);
 
     // the first partition's offset is zero, offsets for subsequent
     // partitions are the accumulated sum of all preceding size (excluding
@@ -63,7 +52,7 @@ PartitionInfo getPartitionInfo(std::size_t const size,
         *max_element(partition_sizes.begin(), partition_sizes.end());
 
     // local_offset, local_length, longest_local_length, global_length
-    return {partition_offsets[mpi_rank], size, longest_partition,
+    return {partition_offsets[mpi.rank], size, longest_partition,
             partition_offsets.back()};
 }
 }  // namespace MeshLib::IO
diff --git a/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp b/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
index 0c627e33cb4a112ebe458477e4e7ff6a37a00e45..aab43d6d2095ef66d9a311a0e7b9a461587ddfcb 100644
--- a/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
+++ b/MeshLib/Utils/transformMeshToNodePartitionedMesh.cpp
@@ -22,6 +22,7 @@
 #include <vector>
 
 #include "BaseLib/Logging.h"
+#include "BaseLib/MPI.h"
 #include "MeshLib/Elements/Element.h"
 #include "MeshLib/Mesh.h"
 #include "MeshLib/Node.h"
@@ -42,15 +43,6 @@ bool isRegularNode(
 
 namespace MeshLib
 {
-std::pair<int, int> getMPIRankAndSize(MPI_Comm const& mpi_comm)
-{
-    int mpi_comm_size;
-    MPI_Comm_size(mpi_comm, &mpi_comm_size);
-    int mpi_comm_rank;
-    MPI_Comm_rank(mpi_comm, &mpi_comm_rank);
-    return {mpi_comm_rank, mpi_comm_size};
-}
-
 std::pair<std::vector<Node*>, std::vector<Element*>> copyNodesAndElements(
     std::vector<Element*> const& input_elements)
 {
@@ -136,15 +128,12 @@ computeRegularBaseNodeGlobalNodeIDsOfSubDomainPartition(
     std::size_t const number_of_regular_nodes =
         computeNumberOfRegularNodes(bulk_mesh, subdomain_mesh);
 
-    // in the following information exchange with other ranks is required
-    MPI_Comm mpi_comm = MPI_COMM_WORLD;
-    auto const [mpi_comm_rank, mpi_comm_size] = getMPIRankAndSize(mpi_comm);
+    BaseLib::MPI::Mpi mpi;
 
     // send own number of regular nodes to all others
-    std::vector<std::size_t> gathered_number_of_regular_nodes(mpi_comm_size);
-    MPI_Allgather(&number_of_regular_nodes, 1, MPI_UNSIGNED_LONG,
-                  gathered_number_of_regular_nodes.data(), 1, MPI_UNSIGNED_LONG,
-                  mpi_comm);
+    std::vector<std::size_t> const gathered_number_of_regular_nodes =
+        BaseLib::MPI::allgather(number_of_regular_nodes, mpi);
+
     // compute the 'offset' in the global_node_ids
     std::vector<std::size_t> numbers_of_regular_nodes_at_rank;
     numbers_of_regular_nodes_at_rank.push_back(0);
@@ -155,8 +144,7 @@ computeRegularBaseNodeGlobalNodeIDsOfSubDomainPartition(
     // add the offset to the partitioned-owned subdomain
     std::vector<std::size_t> subdomain_global_node_ids;
     subdomain_global_node_ids.reserve(subdomain_nodes.size());
-    auto const partition_offset =
-        numbers_of_regular_nodes_at_rank[mpi_comm_rank];
+    auto const partition_offset = numbers_of_regular_nodes_at_rank[mpi.rank];
     DBUG("[{}] partition offset: {}", subdomain_mesh->getName(),
          partition_offset);
     // set the global id for the regular base nodes
@@ -175,9 +163,7 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
     Mesh const* subdomain_mesh,
     std::vector<std::size_t> const& global_regular_base_node_ids)
 {
-    // in the following information exchange with other ranks is required
-    MPI_Comm const mpi_comm = MPI_COMM_WORLD;
-    auto const [mpi_comm_rank, mpi_comm_size] = getMPIRankAndSize(mpi_comm);
+    BaseLib::MPI::Mpi mpi;
 
     // count regular nodes that is the offset in the mapping
     auto const local_bulk_node_ids_for_subdomain =
@@ -203,15 +189,15 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
     auto const size = subdomain_node_id_to_bulk_node_id.size();
     std::size_t global_number_of_subdomain_node_id_to_bulk_node_id = 0;
     MPI_Allreduce(&size, &global_number_of_subdomain_node_id_to_bulk_node_id, 1,
-                  MPI_UNSIGNED_LONG, MPI_SUM, mpi_comm);
+                  MPI_UNSIGNED_LONG, MPI_SUM, mpi.communicator);
 
     DBUG("[{}] global_number_of_subdomain_node_id_to_bulk_node_id: '{}' ",
          subdomain_mesh->getName(),
          global_number_of_subdomain_node_id_to_bulk_node_id);
 
-    std::vector<int> numbers_of_ids_at_ranks(mpi_comm_size);
-    MPI_Allgather(&size, 1, MPI_INT, numbers_of_ids_at_ranks.data(), 1, MPI_INT,
-                  mpi_comm);
+    std::vector<int> const numbers_of_ids_at_ranks =
+        BaseLib::MPI::allgather(static_cast<int>(size), mpi);
+
     std::vector<int> offsets;
     offsets.push_back(0);
     std::partial_sum(begin(numbers_of_ids_at_ranks),
@@ -225,7 +211,7 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
                    numbers_of_ids_at_ranks.data(),           /* recvcounts */
                    offsets.data(),                           /* displs */
                    MPI_UNSIGNED_LONG,                        /* recvtype */
-                   mpi_comm);
+                   mpi.communicator);
 
     // construct a map for fast search of local bulk node ids
     std::map<std::size_t, std::size_t> global_to_local_bulk_node_ids;
@@ -248,9 +234,9 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
         global_number_of_subdomain_node_id_to_bulk_node_id,
         std::numeric_limits<std::size_t>::max());
     // search in all ranks within the bulk ids for the corresponding id
-    for (int rank = 0; rank < mpi_comm_size; ++rank)
+    for (int rank = 0; rank < mpi.size; ++rank)
     {
-        if (rank == mpi_comm_rank)
+        if (rank == mpi.rank)
         {
             continue;
         }
@@ -285,13 +271,13 @@ std::vector<std::size_t> computeGhostBaseNodeGlobalNodeIDsOfSubDomainPartition(
         global_number_of_subdomain_node_id_to_bulk_node_id, /* sendcount */
         MPI_UNSIGNED_LONG,                                  /* sendtype */
         MPI_MAX,                                            /* operation */
-        mpi_comm);
+        mpi.communicator);
 
     std::vector<std::size_t> global_ids_for_subdomain_ghost_nodes(
         computed_global_ids_for_subdomain_ghost_nodes.begin() +
-            offsets[mpi_comm_rank],
+            offsets[mpi.rank],
         computed_global_ids_for_subdomain_ghost_nodes.begin() +
-            offsets[mpi_comm_rank + 1]);
+            offsets[mpi.rank + 1]);
     return global_ids_for_subdomain_ghost_nodes;
 }
 
@@ -301,14 +287,10 @@ std::vector<std::size_t> computeNumberOfRegularBaseNodesAtRank(
     auto const number_of_regular_base_nodes =
         subdomain_mesh->computeNumberOfBaseNodes();
 
-    MPI_Comm const mpi_comm = MPI_COMM_WORLD;
-    auto const [mpi_comm_rank, mpi_comm_size] = getMPIRankAndSize(mpi_comm);
+    BaseLib::MPI::Mpi mpi;
 
-    std::vector<std::size_t> gathered_number_of_regular_base_nodes(
-        mpi_comm_size);
-    MPI_Allgather(&number_of_regular_base_nodes, 1, MPI_UNSIGNED_LONG,
-                  gathered_number_of_regular_base_nodes.data(), 1,
-                  MPI_UNSIGNED_LONG, mpi_comm);
+    std::vector<std::size_t> const gathered_number_of_regular_base_nodes =
+        BaseLib::MPI::allgather(number_of_regular_base_nodes, mpi);
 
     std::vector<std::size_t> numbers_of_regular_base_nodes_at_rank;
     numbers_of_regular_base_nodes_at_rank.push_back(0);
@@ -323,20 +305,15 @@ std::vector<std::size_t> computeNumberOfRegularBaseNodesAtRank(
 std::vector<std::size_t> computeNumberOfRegularHigherOrderNodesAtRank(
     Mesh const* subdomain_mesh)
 {
-    // in the following information exchange with other ranks is required
-    MPI_Comm const mpi_comm = MPI_COMM_WORLD;
-    auto [mpi_comm_rank, mpi_comm_size] = getMPIRankAndSize(mpi_comm);
+    BaseLib::MPI::Mpi mpi;
 
     auto const number_of_regular_base_nodes =
         subdomain_mesh->computeNumberOfBaseNodes();
 
-    std::vector<std::size_t> gathered_number_of_regular_higher_order_nodes(
-        mpi_comm_size);
     auto const number_of_regular_higher_order_nodes =
         subdomain_mesh->getNumberOfNodes() - number_of_regular_base_nodes;
-    MPI_Allgather(&number_of_regular_higher_order_nodes, 1, MPI_UNSIGNED_LONG,
-                  gathered_number_of_regular_higher_order_nodes.data(), 1,
-                  MPI_UNSIGNED_LONG, mpi_comm);
+    std::vector<std::size_t> gathered_number_of_regular_higher_order_nodes =
+        BaseLib::MPI::allgather(number_of_regular_higher_order_nodes, mpi);
 
     std::vector<std::size_t> numbers_of_regular_higher_order_nodes_at_rank;
     numbers_of_regular_higher_order_nodes_at_rank.push_back(0);