diff --git a/docs/conf.py b/docs/conf.py
index ea2a5ae80a3f7f12db64046c55d6acd2628b56d6..434978c46294ccefa1224fe550d4703423bbda41 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -36,6 +36,7 @@ extensions = [
     "sphinx_gallery.gen_gallery",
 ]
 
+
 templates_path = ["_templates"]
 exclude_patterns = [
     "_build",
diff --git a/docs/examples/howto_logparser/README.rst b/docs/examples/howto_logparser/README.rst
index 7e11020beac367a3b15f163de5d268670d0d7008..4af7e42631630b845e35da44a39f2774dfb720ae 100644
--- a/docs/examples/howto_logparser/README.rst
+++ b/docs/examples/howto_logparser/README.rst
@@ -1,6 +1,6 @@
-How to use the OGS log parser
+How to use logparser
 =============================
 
 .. sectionauthor:: Tobias Meisel (Helmholtz Centre for Environmental Research GmbH - UFZ)
 
-The following jupyter notebooks provide some examples of how to use the log parser.
+The following jupyter notebooks provide some examples of how to use the logparser.
diff --git a/docs/examples/howto_logparser/plot_logparser_advanced.py b/docs/examples/howto_logparser/plot_logparser_advanced.py
index cac0fa4616a88925ff9964540af2aba0ea5c00da..dc7fc3cb47ca09555c250f154d56cbd2be07ca05 100644
--- a/docs/examples/howto_logparser/plot_logparser_advanced.py
+++ b/docs/examples/howto_logparser/plot_logparser_advanced.py
@@ -1,83 +1,122 @@
 """
-Log parser - Advanced topics
+Advanced topics
 ==================================
 
-This example shows how to analyse the  OGS log output to get information
-about performance of different parts of ogs.
-It uses a log file generated by ogs with project file from the following benchmark:
+We cover:
 
+1. Logs from parallel computation (OGS with MPI runs)
+
+2. Performance tuning
+
+3. Custom analyses
+
+Although these topics are presented together they do not depend on each other and can be used separately.
 
 """
 
 # %%
+from pathlib import Path
+
+import numpy as np
 import pandas as pd
 
 from ogstools.logparser import (
-    analysis_convergence_coupling_iteration,
-    analysis_convergence_newton_iteration,
     analysis_time_step,
     fill_ogs_context,
     parse_file,
-    time_step_vs_iterations,
 )
 from ogstools.logparser.examples import (
     const_viscosity_thermal_convection_log,
+    parallel_log,
 )
 
 # %%
-log = const_viscosity_thermal_convection_log
+# 1. Logs from parallel computations (with MPI)
+# =================================================================
+# The log file to be investigated in this example is the result of a mpirun (-np 3) from https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj
+
+
+log = parallel_log
 records = parse_file(log)
 df_records = pd.DataFrame(records)
-df_log = fill_ogs_context(df_records)
-df_ts_it = time_step_vs_iterations(df_log)
-df_ts_it  # noqa: B018
+df_parallel = fill_ogs_context(df_records)
+print(df_parallel.columns)
+df_parallel  # noqa: B018
 
+df_ts = analysis_time_step(df_parallel)
+# For each mpi_process and each time_step we get the measurements (e.g. output_time)
+df_ts  # noqa: B018
 # %%
-df_ts_it.plot(grid=True)
-
+# 1.1. Aggregate measurements over all MPI processes
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# If you are not particularly interested in the performance of each MPI_process pandas gives you all you need to further process data. However, for performance measurement it is recommended to consider always the slowest MPI_process for meaningful interpretation of overall performance (because of synchronization barriers in the evaluation scheme of OGS).
+# Then the resulting DataFrame has the same structure like a DataFrame gained from serial OGS log.
+df_ts.groupby("time_step").max()
 
 # %%
-# Performance of in separate parts by time step
-df_ts = analysis_time_step(df_log)
-df_ts = df_ts.loc[
-    0
-]  # log of serial so we can remove MPI_process (index=0) from result (all are 0)
-df_ts  # noqa: B018
+df_ts[["output_time", "assembly_time"]].boxplot()
+
 # %%
-# Data manipulation with pandas
-df_ts[
-    ["output_time", "assembly_time", "dirichlet_time", "linear_solver_time"]
-].plot(logy=True, grid=True)
+# 2. Performance tuning
+# =================================================================
+#
+# You can either (2.1) Reduce set of regular expressions when you exactly know what you final analysis will need AND / OR
+# (2.2.) Save and load the pandas.DataFrame for the records.
+#
+# 2.1. Reduce regular expression
+# ----------------------------------------------------------------
+# The logparser tries to find matching regular expressions for each line. By default it iterates over all entries specified in :py:mod:`ogstools.logparser.ogs_regexes`.
+# You can reduce it to the amount of entries you are actually interested in.
+# For this example we are only interested in the number of iterations per time step.
+# Because the parsing process is expensive, it is feasible to store the records to a file.
+
 
 # %%
+# 2.2. Save and load records
+# ----------------------------------------------------------------
+# We recommend to save the records by any of these methodes http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html.
 
-analysis_convergence_newton_iteration(df_log)
+# df_records.to_hdf("anyfilename.csv")
+# pd.read_hdf("anyfilename.csv")
 
 
 # %%
-# Staggered
-# Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj#
-records = parse_file(
-    "/home/meisel/gitlabrepos/ogstools/staggered_heat_transport_in_stationary_flow.log"
-)
+# 3. Custom analyses
+# =====================
+# 3.1. Introduction into functions of the logparser
+# ----------------------------------------------------------------
+# The function :py:mod:`ogstools.logparser.parse_file` iterates over all lines in the log file. For a specific set of regular expressions it finds it creates a new entry into a list (here named records)
+#
+log = const_viscosity_thermal_convection_log
+
+# Let us print the content of the log file in this example first.
+with Path(log).open() as log_file:
+    print(log_file.read())
+
+# ToDo link to documentation
+records = parse_file(log)
+# The list of records can directly be transformed into a pandas.DataFrame for further inspections. It is the raw presentation of a filtered ogs log in pandas DataFrame format.
 df_records = pd.DataFrame(records)
-df_log = fill_ogs_context(df_records)
-analysis_convergence_coupling_iteration(df_log)
+# The logparser is able to find the following entries:
+print(df_records.columns)
+# For each entry :py:mod:`ogstools.logparser.ogs_regexes` has added the type (corresponding to ogs log level) and value found to the result DataFrame.
+df_records  # noqa: B018
 
 
 # %%
-# Parallel
-df_ts.loc[0]
-# %%
-# Advanced
+
+# For each information (e.g. a time measurement or numerical metric) we need to know to which timestep, iteration_number, process, component it belongs.
+# ToDo link to documentation, add this information to the table
+df_log = fill_ogs_context(df_records)
+df_log  # noqa: B018
 
 # %%
-## Custom
-df_records  # noqa: B018
-# %%
-x = df_records.pivot_table(["step_size", "iteration_number"], ["time_step"])
-x.plot(subplots=True, sharex=True, grid=True)
+# 3.2. Custom analyses - example
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# We create a pivot_table where for each time step we see the step_size and the number of iterations.
+
 
-# Computing logs takes to much time
-# custom regexes, log level
-# force_parallel
+df_custom = df_records.pivot_table(
+    ["step_size", "iteration_number"], ["time_step"], aggfunc=np.max
+)
+df_custom  # noqa: B018
diff --git a/docs/examples/howto_logparser/plot_logparser_analyses.py b/docs/examples/howto_logparser/plot_logparser_analyses.py
index 36e806b45442bbb961913fec67b9cd9d1fa8511a..2a330091f5580dcc6abbc73d0c7b464d2c7c074f 100644
--- a/docs/examples/howto_logparser/plot_logparser_analyses.py
+++ b/docs/examples/howto_logparser/plot_logparser_analyses.py
@@ -1,8 +1,8 @@
 """
-Log parser - Predefined Analyses
+Predefined Analyses
 =======================================
 
-Here we shows the different predefined analysis available in the log parser.
+Here we shows the different predefined analysis available in the logparser.
 We uses the project file from the following benchmark:
 `ogs: Constant viscosity (Hydro-Thermal)
 <https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with
@@ -35,36 +35,47 @@ df_records = pd.DataFrame(records)
 df_log = fill_ogs_context(df_records)
 
 # %%
-# Every time step of the simulation and how many iterations have been needed.
+# Iterations per time step
+# -------------------------
+# :py:mod:`ogstools.logparser.analysis_time_step`
 df_ts_it = time_step_vs_iterations(df_log)
 df_ts_it  # noqa: B018
 
 
 # %%
 # Performance of in separate parts by time step
+# ----------------------------------------------------------------
+# :py:mod:`ogstools.logparser.analysis_time_step`
 df_ts = analysis_time_step(df_log)
 df_ts = df_ts.loc[0]
 # log of serial so we can remove MPI_process (index=0) from result (all are 0) - see advanced
 df_ts  # noqa: B018
 # %%
 # Performance of in separate parts by time step - plot
-df_ts[
-    ["output_time", "assembly_time", "dirichlet_time", "linear_solver_time"]
-].plot(logy=True, grid=True)
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+df_ts[["assembly_time", "dirichlet_time", "linear_solver_time"]].plot(
+    logy=True, grid=True
+)
 
 # %%
 # Analysis of convergence criteria - Newton iterations
+# ----------------------------------------------------------------
+# :py:mod:`ogstools.logparser.analysis_convergence_newton_iteration`
 analysis_convergence_newton_iteration(df_log)
 
 
 # %%
 # Staggered
-# Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj#
+# ----------------------------------------------------------------
+# :py:mod:`ogstools.logparser.analysis_convergence_coupling_iteration`
+# We use the logs generated when running
+# https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/Parabolic/HT/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj
 #
 log = staggered_log
 records = parse_file(log)
 df_records = pd.DataFrame(records)
 df_log = fill_ogs_context(df_records)
 
-# Only for staggered coupled processes
+# Only for staggered coupled processes !
 analysis_convergence_coupling_iteration(df_log)
diff --git a/docs/examples/howto_logparser/plot_logparser_intro.py b/docs/examples/howto_logparser/plot_logparser_intro.py
index bce9a3f13331c8e2ccffcedd6f37668368840e6d..f88448c3c0972e966fede29a56e96e0534e71326 100644
--- a/docs/examples/howto_logparser/plot_logparser_intro.py
+++ b/docs/examples/howto_logparser/plot_logparser_intro.py
@@ -1,12 +1,11 @@
 """
-Log parser - Introduction
+Introduction
 ================================
 
 This basic example shows a how to analyse the OGS log output to get information
 about performance of different parts of ogs.
 It uses the project file from the following benchmark:
-`ogs: Constant viscosity (Hydro-Thermal)
-<https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with
+ogs: Constant viscosity (Hydro-Thermal) https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity with
 `<t_end> 1e8 </t_end>`
 """
 
@@ -23,20 +22,38 @@ from ogstools.logparser.examples import (
 )
 
 # %%
+# The log file
+# -------------
+# `log` is a str representing the location of the ogs log file.
+# Make sure the log file does not contain ANSI escape (e.g.color) code. https://en.wikipedia.org/wiki/ANSI_escape_code
+# Only if: You can remove it: ``cat ogs.log | sed 's/\x1b\[[0-9;]*m//g' > ogs.log```
 log = const_viscosity_thermal_convection_log
-# Purpose of records and fill_ogs_context is explained in advanced section
+
+# %%
+# Parsing steps
+# ----------------------------
+# The functions :py:mod:`ogstools.logparser.parse_file`  and :py:mod:`ogstools.logparser.fill_ogs_context` are explained in :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_advanced.py`.
+# All predefined analyses need the result of fill_ogs_context.
 records = parse_file(log)
 df_records = pd.DataFrame(records)
 df_log = fill_ogs_context(df_records)
-# This is one of many predined analyses. All possibilities are show here:
+
+# %%
+# Use predefined analyses
+# ----------------------------------------------------------------
+# :py:mod:`ogstools.logparser.time_step_vs_iterations` is one of many predined analyses. All possibilities are shown here:
+# :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_analyses.py`.
+#
 # Here we are interested in every time step of the simulation and how many iterations have been needed.
 # For analysis runs only with log of log-level `ogs -l info` or `ogs - l debug` according to
-# `OpenGeoSys Docs: Log and Debug Output <https://www.opengeosys.org/docs/devguide/advanced/log-and-debug-output/>`
+# (see: https://www.opengeosys.org/docs/devguide/advanced/log-and-debug-output)
 
 df_ts_it = time_step_vs_iterations(df_log)
 # The result is a pandas.DataFrame. You may manipulate the dataframe to your needs with pandas functionality.
 df_ts_it  # noqa: B018
 
 # %%
-# Or directly use pandas functionality to plot.
+# Pandas to plot
+# -------------------
+# You can directly use pandas plot https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html function from the resulting DataFrame.
 df_ts_it.plot(grid=True)
diff --git a/ogstools/logparser/common_ogs_analyses.py b/ogstools/logparser/common_ogs_analyses.py
index 01b04c9287652f075cff59cbec6106c0753ff7c0..e5a08463ff2f58daac7c0bfa89dab8ad658e6446 100644
--- a/ogstools/logparser/common_ogs_analyses.py
+++ b/ogstools/logparser/common_ogs_analyses.py
@@ -195,11 +195,34 @@ def analysis_simulation_termination(df: pd.DataFrame):
     return pd.DataFrame()
 
 
-def fill_ogs_context(df: pd.DataFrame):
-    # Some columns that contain actual integer values are converted to float
-    # See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
-    # ToDo list of columns with integer values are known from regular expression
+def fill_ogs_context(df_raw_log: pd.DataFrame):
+    """
+    Fill missing values in OpenGeoSys (OGS) log DataFrame by context.
+
+    This function fills missing values in an OpenGeoSys (OGS) log DataFrame by context. Some logs do not contain information about time_step and iteration. The information must be collected by context, by surrounding log lines from the same MPI process. Logs are grouped by MPI process to get only surrounding log lines from the same MPI process. It is assumed that all following lines belong to the same time step until the next collected value of the time step. Some columns that contain actual integer values are converted to float.
+
+    Parameters:
+    - df (pd.DataFrame): DataFrame containing the raw OGS log data. Usually, the result of pd.DataFrame(parse_file(file))
+
+    Returns:
+    - pd.DataFrame: DataFrame with missing values filled by context.
+
+    References:
+    - Pandas documentation : https://pandas.pydata.org/pandas-docs/stable/user_guide/
 
+    Todo:
+    - List of columns with integer values are known from regular expression.
+
+    Notes:
+    - Some logs do not contain information about time_step and iteration. The information must be collected by context (by surrounding log lines from same mpi_process)
+      Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
+      There are log lines that give the current time step (when time step starts).
+      It can be assumed that in all following lines belong to this time steps, until next collected value of time step
+      Some columns that contain actual integer values are converted to float
+      See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
+      ToDo list of columns with integer values are known from regular expression
+
+    """
     int_columns = [
         "line",
         "mpi_process",
@@ -210,10 +233,10 @@ def fill_ogs_context(df: pd.DataFrame):
         "component",
         "process",
     ]
-    for column in df.columns:
+    for column in df_raw_log.columns:
         if column in int_columns:
             try:
-                df[column] = df[column].astype("Int64")
+                df_raw_log[column] = df_raw_log[column].astype("Int64")
             except ValueError:
                 print(
                     f"Could not convert column '{column}' to integer due to value error"
@@ -223,36 +246,29 @@ def fill_ogs_context(df: pd.DataFrame):
                     f"Could not convert column '{column}' to integer due to type error"
                 )
 
-    # Some logs do not contain information about time_step and iteration
-    # The information must be collected by context (by surrounding log lines from same mpi_process)
-    # Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
-
-    # There are log lines that give the current time step (when time step starts).
-    # It can be assumed that in all following lines belong to this time steps, until next collected value of time step
-    df["time_step"] = (
-        df.groupby("mpi_process")[["time_step"]]
+    df_raw_log["time_step"] = (
+        df_raw_log.groupby("mpi_process")[["time_step"]]
         .fillna(method="ffill")
         .fillna(value=0)
     )
 
     # Back fill, because iteration number can be found in logs at the END of the iteration
-    df["iteration_number"] = df.groupby("mpi_process")[
+    df_raw_log["iteration_number"] = df_raw_log.groupby("mpi_process")[
         ["iteration_number"]
     ].fillna(method="bfill")
 
-    # ToDo Comment
-    if "component" in df:
-        df["component"] = df.groupby("mpi_process")[["component"]].fillna(
-            value=-1
-        )
+    if "component" in df_raw_log:
+        df_raw_log["component"] = df_raw_log.groupby("mpi_process")[
+            ["component"]
+        ].fillna(value=-1)
     # Forward fill because process will be printed in the beginning - applied to all subsequent
-    if "process" in df:
-        df["process"] = df.groupby("mpi_process")[["process"]].fillna(
-            method="bfill"
-        )
+    if "process" in df_raw_log:
+        df_raw_log["process"] = df_raw_log.groupby("mpi_process")[
+            ["process"]
+        ].fillna(method="bfill")
     # Attention - coupling iteration applies to successor line and to all other predecessors - it needs further processing for specific analysis
-    if "coupling_iteration_process" in df:
-        df["coupling_iteration_process"] = df.groupby("mpi_process")[
-            ["coupling_iteration_process"]
-        ].fillna(method="ffill", limit=1)
-    return df
+    if "coupling_iteration_process" in df_raw_log:
+        df_raw_log["coupling_iteration_process"] = df_raw_log.groupby(
+            "mpi_process"
+        )[["coupling_iteration_process"]].fillna(method="ffill", limit=1)
+    return df_raw_log
diff --git a/ogstools/logparser/examples/LiquidFlow.log b/ogstools/logparser/examples/LiquidFlow.log
deleted file mode 100644
index 03d37bfc64192888fe965f171236fc35aa9c706b..0000000000000000000000000000000000000000
--- a/ogstools/logparser/examples/LiquidFlow.log
+++ /dev/null
@@ -1,59 +0,0 @@
-info: This is OpenGeoSys-6 version 6.5.0-286-ge7ef7067.
-info: OGS started on 2024-03-19 14:15:14+0100.
-info: Reading project file /home/meisel/o/s/Tests/Data/Parabolic/LiquidFlow/SimpleSynthetics/PrimaryVariableConstraintDirichletBC/cuboid_1x1x1_hex_1000_Dirichlet_Dirichlet_1.prj.
-info: readRasters ...
-info: readRasters done
-info: ConstantParameter: p0
-info: ConstantParameter: part_of_left_boundary_Dirichlet
-info: ConstantParameter: part_of_right_boundary_Dirichlet
-info: ConstantParameter: constant_porosity_parameter
-info: ConstantParameter: kappa1
-info: ConstantParameter: p_spatial
-info: ConstantParameter: zero
-info: ConstantParameter: p_Dirichlet_top
-info: No source terms for process variable 'pressure' found.
-info: LiquidFlow process is set to be linear.
-info: Initialize processes.
-info: [time] Output of timestep 0 took 0.00556133 s.
-info: OpenGeoSys is now initialized.
-info: OGS started on 2024-03-19 14:15:14+0100.
-info: Solve processes.
-info: === Time stepping at step #1 and time 43200 with step size 43200
-info: [time] Assembly took 0.0763653 s.
-info: [time] Applying Dirichlet BCs took 0.00057093 s.
-info: ------------------------------------------------------------------
-info: *** Eigen solver compute()
-info: -> compute with Eigen iterative linear solver CG (precon DIAGONAL)
-info: ------------------------------------------------------------------
-info: *** Eigen solver solve()
-info: -> solve with Eigen iterative linear solver CG (precon DIAGONAL)
-info: 	 iteration: 47/10000
-info: 	 residual: 5.835249e-21
-
-info: [time] Linear solver took 0.0120602 s.
-info: [time] Iteration #1 took 0.0891769 s.
-info: [time] Solving process #0 took 0.0892044 s in time step #1
-info: [time] Time step #1 took 0.0896693 s.
-info: [time] Output of timestep 1 took 0.00817603 s.
-info: === Time stepping at step #2 and time 86400 with step size 43200
-info: [time] Assembly took 0.0627239 s.
-info: [time] Applying Dirichlet BCs took 0.000460932 s.
-info: ------------------------------------------------------------------
-info: *** Eigen solver compute()
-info: -> compute with Eigen iterative linear solver CG (precon DIAGONAL)
-info: ------------------------------------------------------------------
-info: *** Eigen solver solve()
-info: -> solve with Eigen iterative linear solver CG (precon DIAGONAL)
-info: 	 iteration: 55/10000
-info: 	 residual: 7.003839e-21
-
-info: [time] Linear solver took 0.00256477 s.
-info: [time] Iteration #1 took 0.065935 s.
-info: [time] Solving process #0 took 0.0659825 s in time step #2
-info: [time] Time step #2 took 0.0663716 s.
-info: [time] Output of timestep 2 took 0.0160351 s.
-info: The whole computation of the time stepping took 2 steps, in which
-	 the accepted steps are 2, and the rejected steps are 0.
-
-info: [time] Execution took 0.18038 s.
-info: OGS terminated on 2024-03-19 14:15:14+0100.
diff --git a/ogstools/logparser/examples/__init__.py b/ogstools/logparser/examples/__init__.py
index c930c81f8776a3ea88d767221b3248f3f38654ea..25482751860d7aa32e7b1acdc5834241c92a5e4c 100644
--- a/ogstools/logparser/examples/__init__.py
+++ b/ogstools/logparser/examples/__init__.py
@@ -1,8 +1,8 @@
 from importlib import resources
 
 _prefix = resources.files(__name__)
-liquid_flow_log = _prefix / "ogs.log"
 const_viscosity_thermal_convection_log = (
     _prefix / "ConstViscosityThermalConvection.log"
 )
-staggered_log = "staggered_heat_transport_in_stationary_flow.log"
+staggered_log = _prefix / "staggered_heat_transport_in_stationary_flow.log"
+parallel_log = _prefix / "steady_state_diffusion_parallel.log"
diff --git a/ogstools/logparser/examples/steady_state_diffusion_parallel.log b/ogstools/logparser/examples/steady_state_diffusion_parallel.log
new file mode 100644
index 0000000000000000000000000000000000000000..ce1904b0d97ca59ae2a9042ccd46550e82e66ba9
--- /dev/null
+++ b/ogstools/logparser/examples/steady_state_diffusion_parallel.log
@@ -0,0 +1,114 @@
+info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
+info: OGS started on 2024-03-21 13:17:33+0100.
+info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
+info: OGS started on 2024-03-21 13:17:33+0100.
+info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
+info: OGS started on 2024-03-21 13:17:33+0100.
+[2] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
+[0] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
+[1] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
+[0] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
+[1] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
+[2] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
+[0] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
+[2] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
+[1] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
+[0] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
+[2] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
+[1] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
+[0] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
+	You can ignore this warning if the mesh does not contain integration_point-wise property data.
+[2] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
+	You can ignore this warning if the mesh does not contain integration_point-wise property data.
+[1] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
+	You can ignore this warning if the mesh does not contain integration_point-wise property data.
+[1] info: [time] Reading the mesh took 0.007251 s.
+[2] info: [time] Reading the mesh took 0.007835 s.
+[0] info: [time] Reading the mesh took 0.008025 s.
+[2] info: readRasters ...
+[2] info: readRasters done
+[2] info: ConstantParameter: p0
+[2] info: ConstantParameter: p_Dirichlet_left
+[2] info: ConstantParameter: p_Dirichlet_right
+[1] info: readRasters ...
+[1] info: readRasters done
+[1] info: ConstantParameter: p0
+[2] info: No source terms for process variable 'pressure' found.
+[1] info: ConstantParameter: p_Dirichlet_left
+[1] info: ConstantParameter: p_Dirichlet_right
+[0] info: readRasters ...
+[0] info: readRasters done
+[1] info: No source terms for process variable 'pressure' found.
+[0] info: ConstantParameter: p0
+[0] info: ConstantParameter: p_Dirichlet_left
+[0] info: ConstantParameter: p_Dirichlet_right
+[0] info: No source terms for process variable 'pressure' found.
+[1] info: Initialize processes.
+[2] info: Initialize processes.
+[0] info: Initialize processes.
+[0] info: HDF5: Using a single chunk for dataset geometry .
+[2] info: HDF5: Using a single chunk for dataset geometry .
+[1] info: HDF5: Using a single chunk for dataset geometry .
+[2] info: HDF5: Using a single chunk for dataset geometry .
+[2] info: HDF5: Using a single chunk for dataset topology .
+[1] info: HDF5: Using a single chunk for dataset geometry .
+[1] info: HDF5: Using a single chunk for dataset topology .
+[0] info: HDF5: Using a single chunk for dataset geometry .
+[0] info: HDF5: Using a single chunk for dataset topology .
+[0] info: HDF5: Using a single chunk for dataset pressure .
+[1] info: HDF5: Using a single chunk for dataset pressure .
+[2] info: HDF5: Using a single chunk for dataset pressure .
+[0] info: HDF5: Using a single chunk for dataset v .
+[2] info: HDF5: Using a single chunk for dataset v .
+[1] info: HDF5: Using a single chunk for dataset v .
+[1] info: [time] Output of timestep 0 took 0.00447257 s.
+[2] info: [time] Output of timestep 0 took 0.00447257 s.
+[2] info: Solve processes.
+[0] info: [time] Output of timestep 0 took 0.00449248 s.
+[0] info: Solve processes.
+[1] info: Solve processes.
+[1] info: === Time stepping at step #1 and time 0.1 with step size 0.1
+[2] info: === Time stepping at step #1 and time 0.1 with step size 0.1
+[0] info: === Time stepping at step #1 and time 0.1 with step size 0.1
+[1] info: [time] Assembly took 0.0058952 s.
+[0] info: [time] Assembly took 0.00589635 s.
+[2] info: [time] Assembly took 0.00589633 s.
+[0] info: [time] Applying Dirichlet BCs took 0.000100729 s.
+[1] info: [time] Applying Dirichlet BCs took 9.7956e-05 s.
+[2] info: [time] Applying Dirichlet BCs took 0.000100732 s.
+
+================================================
+Linear solver bcgs with mg preconditioner using PRECONDITIONED
+converged in 15 iterations (relative convergence criterion fulfilled).
+================================================
+[0] info: [time] Linear solver took 0.0031392 s.
+[2] info: [time] Linear solver took 0.0031265 s.
+[2] info: [time] Iteration #1 took 0.00918299 s.
+[1] info: [time] Linear solver took 0.0031268 s.
+[1] info: [time] Iteration #1 took 0.00917942 s.
+[0] info: [time] Iteration #1 took 0.00918262 s.
+[0] info: [time] Solving process #0 took 0.00926697 s in time step #1
+[1] info: [time] Solving process #0 took 0.00926667 s in time step #1
+[2] info: [time] Solving process #0 took 0.00926656 s in time step #1
+[2] info: [time] Time step #1 took 0.0093221 s.
+[0] info: [time] Time step #1 took 0.00931536 s.
+[1] info: [time] Time step #1 took 0.00932201 s.
+[2] info: [time] Output of timestep 1 took 0.00231362 s.
+[2] info: The whole computation of the time stepping took 1 steps, in which
+	 the accepted steps are 1, and the rejected steps are 0.
+
+[1] info: [time] Output of timestep 1 took 0.00231531 s.
+[1] info: The whole computation of the time stepping took 1 steps, in which
+	 the accepted steps are 1, and the rejected steps are 0.
+
+[1] info: [time] Execution took 0.0873399 s.
+[0] info: [time] Output of timestep 1 took 0.00231734 s.
+[0] info: The whole computation of the time stepping took 1 steps, in which
+	 the accepted steps are 1, and the rejected steps are 0.
+
+[0] info: [time] Execution took 0.0873461 s.
+[2] info: [time] Execution took 0.087346 s.
+[0] info: [time] Output of XDMF to cube_1e3_np3_cube_1x1x1_hex_1e3.xdmf took 8.7512e-05 s.
+[1] info: OGS terminated on 2024-03-21 13:17:34+0100.
+[0] info: OGS terminated on 2024-03-21 13:17:34+0100.
+[2] info: OGS terminated on 2024-03-21 13:17:34+0100.
diff --git a/ogstools/logparser/log_parser.py b/ogstools/logparser/log_parser.py
index 0be73a428b76f15119b34366df3740eae4bb4424..a07430f6a43494e71f0b32c2168f5e677ea5d94e 100644
--- a/ogstools/logparser/log_parser.py
+++ b/ogstools/logparser/log_parser.py
@@ -5,7 +5,7 @@
 
 import re
 from pathlib import Path
-from typing import Any, Optional, Union
+from typing import Any, Callable, Optional, Union
 
 from ogstools.logparser.ogs_regexes import ogs_regexes
 
@@ -80,6 +80,7 @@ def parse_file(
     file_name: Union[str, Path],
     maximum_lines: Optional[int] = None,
     force_parallel: bool = False,
+    ogs_res: Callable = ogs_regexes,
 ) -> list[Any]:
     """
     Parses a log file from OGS, applying regex patterns to extract specific information,
@@ -99,7 +100,6 @@ def parse_file(
     if isinstance(file_name, str):
         file_name = Path(file_name)
     file_name = Path(file_name)
-    ogs_res = ogs_regexes()
     parallel_log = force_parallel or mpi_processes(file_name) > 1
 
     if parallel_log:
@@ -113,7 +113,7 @@ def parse_file(
         return lambda regex: re.compile(mpi_process_regex + regex)
 
     compile_re = compile_re_fn(process_regex)
-    patterns = [(compile_re(k), v) for k, v in ogs_res]
+    patterns = [(compile_re(k), v) for k, v in ogs_res()]
 
     number_of_lines_read = 0
     with file_name.open() as file: