diff --git a/docs/examples/howto_logparser/plot_logparser_advanced.py b/docs/examples/howto_logparser/plot_logparser_advanced.py
index 9e6e415a090fe9fc45d093bd3d441d5375c3ed2b..6c7c3579e98a30ad044739e41b7b91644efc8508 100644
--- a/docs/examples/howto_logparser/plot_logparser_advanced.py
+++ b/docs/examples/howto_logparser/plot_logparser_advanced.py
@@ -1,12 +1,12 @@
 """
 Advanced topics
-==================================
+===============
 
 We cover:
 
 1. Logs from parallel computation (OGS with MPI runs)
 
-2. Performance tuning
+2. Reduce computation time to process logs
 
 3. Custom analyses
 
@@ -32,23 +32,21 @@ from ogstools.logparser.examples import (
 
 # %%
 # 1. Logs from parallel computations (with MPI)
-# ----------------------------------------------------------------
+# ---------------------------------------------
 # The log file to be investigated in this example is the result of a mpirun (-np 3) from https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj
 
 
-log = parallel_log
-records = parse_file(log)
+records = parse_file(parallel_log)
 df_records = pd.DataFrame(records)
 df_parallel = fill_ogs_context(df_records)
-print(df_parallel.columns)
 df_parallel  # noqa: B018
 
 df_ts = analysis_time_step(df_parallel)
 # For each mpi_process and each time_step we get the measurements (e.g. output_time)
 df_ts  # noqa: B018
-# %%
+# %% [markdown]
 # 1.1. Aggregate measurements over all MPI processes
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 # If you are not particularly interested in the performance of each MPI_process pandas gives you all you need to further process data. However, for performance measurement it is recommended to consider always the slowest MPI_process for meaningful interpretation of overall performance (because of synchronization barriers in the evaluation scheme of OGS).
 # Then the resulting DataFrame has the same structure like a DataFrame gained from serial OGS log.
 df_ts.groupby("time_step").max()
@@ -56,46 +54,52 @@ df_ts.groupby("time_step").max()
 # %%
 df_ts[["output_time", "assembly_time"]].boxplot()
 
-# %%
-# 2. Performance tuning
-# ----------------------------------------------------------------
+# %% [markdown]
+# 2. Reduce computation time to process logs
+# ------------------------------------------
 #
-# You can either (2.1) Reduce set of regular expressions when you exactly know what you final analysis will need AND / OR
+# To reduce the computation to evaluate the logs you can either
+# (2.1) Reduce set of regular expressions, when you exactly know,
+# what you final analysis will need
+
+# AND / OR
+
 # (2.2.) Save and load the pandas.DataFrame for the records.
 #
 # 2.1. Reduce regular expression
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 # The logparser tries to find matching regular expressions for each line. By default it iterates over all entries specified in :py:mod:`ogstools.logparser.ogs_regexes`.
 # You can reduce it to the amount of entries you are actually interested in.
 # For this example we are only interested in the number of iterations per time step.
-# Because the parsing process is expensive, it is feasible to store the records to a file.
+# Because the parsing process is expensive, it is useful to store the records to a file.
+# According to :py:mod:`ogstools.logparser.parse_file`
+# via parameter `regexes` a list of reduced or custom regexes can be provided.
 
 
-# %%
+# %% [markdown]
 # 2.2. Save and load records
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~
 # We recommend to save the records by any of these methodes http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html.
-
+# For example with hdf:
+# ```python
 # df_records.to_hdf("anyfilename.csv")
 # pd.read_hdf("anyfilename.csv")
-
+# ```
 
 # %%
 # 3. Custom analyses
-# ----------------------------------------------------------------
+# ------------------
 # 3.1. Introduction into functions of the logparser
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# The function :py:mod:`ogstools.logparser.parse_file` iterates over all lines in the log file. For a specific set of regular expressions it finds it creates a new entry into a list (here named records)
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# The function :py:mod:`ogstools.logparser.parse_file` iterates over all lines in the log file. For a specific set of regular expressions it finds and creates a new entry into a list (here named records)
 #
-log = const_viscosity_thermal_convection_log
 
 # Let us print the content of the log file in this example first.
-with Path(log).open() as log_file:
+with Path(const_viscosity_thermal_convection_log).open() as log_file:
     print(log_file.read())
 
-# ToDo link to documentation
-records = parse_file(log)
-# The list of records can directly be transformed into a pandas.DataFrame for further inspections. It is the raw presentation of a filtered ogs log in pandas DataFrame format.
+records = parse_file(const_viscosity_thermal_convection_log)
+# The list of records can directly be transformed into a pandas.DataFrame for further inspections. It is the raw representation of a filtered ogs log in pandas DataFrame format.
 df_records = pd.DataFrame(records)
 # The logparser is able to find the following entries:
 print(df_records.columns)
@@ -106,13 +110,12 @@ df_records  # noqa: B018
 # %%
 
 # For each information (e.g. a time measurement or numerical metric) we need to know to which timestep, iteration_number, process, component it belongs.
-# ToDo link to documentation, add this information to the table
 df_log = fill_ogs_context(df_records)
 df_log  # noqa: B018
 
 # %%
 # 3.2. Custom analyses - example
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 # We create a pivot_table where for each time step we see the step_size and the number of iterations.
 
 
diff --git a/docs/examples/howto_logparser/plot_logparser_analyses.py b/docs/examples/howto_logparser/plot_logparser_analyses.py
index 2a330091f5580dcc6abbc73d0c7b464d2c7c074f..66dc4ef10dabc8c9bdba0294b5f2c9340f36b489 100644
--- a/docs/examples/howto_logparser/plot_logparser_analyses.py
+++ b/docs/examples/howto_logparser/plot_logparser_analyses.py
@@ -1,14 +1,16 @@
 """
 Predefined Analyses
-=======================================
+===================
 
-Here we shows the different predefined analysis available in the logparser.
-We uses the project file from the following benchmark:
-`ogs: Constant viscosity (Hydro-Thermal)
-<https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with
-`<t_end> 1e8 </t_end>`
-and for the staggered scheme the variant taken from
-`Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj`
+Here we show the different predefined analysis available in the logparser.
+We use the project file from the following benchmark:
+ogs: Constant viscosity (Hydro-Thermal)
+<https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>
+
+with `<t_end> 1e8 </t_end>`
+
+and for the **staggered scheme** the variant taken from
+Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj
 
 """
 
@@ -29,6 +31,9 @@ from ogstools.logparser.examples import (
 )
 
 # %%
+# The log preprocessing is same for all examples and explained in
+# :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_advanced.py`.
+
 log = const_viscosity_thermal_convection_log
 records = parse_file(log)
 df_records = pd.DataFrame(records)
@@ -36,38 +41,57 @@ df_log = fill_ogs_context(df_records)
 
 # %%
 # Iterations per time step
-# -------------------------
+# ------------------------
+# Please see explanation in logparser
+# :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_intro.py`.
+# (Section: Use predefined analyses)
+#
 # :py:mod:`ogstools.logparser.analysis_time_step`
 df_ts_it = time_step_vs_iterations(df_log)
 df_ts_it  # noqa: B018
 
 
 # %%
-# Performance of in separate parts by time step
-# ----------------------------------------------------------------
-# :py:mod:`ogstools.logparser.analysis_time_step`
+# Performance in separate parts by time step
+# ---------------------------------------------
+# The resulting table presents the performance metrics for separate parts of the simulation,
+# organized by time step. Is uses :py:mod:`ogstools.logparser.analysis_time_step`.
+# Each row corresponds to a specific time step, displaying metrics such
+# as output time [s], step size [s], time step solution time [s], assembly time [s],
+# Dirichlet time [s], and linear solver time [s].
+
 df_ts = analysis_time_step(df_log)
 df_ts = df_ts.loc[0]
-# log of serial so we can remove MPI_process (index=0) from result (all are 0) - see advanced
+# log of serial so we can remove MPI_process (index=0) from result (all are 0)
+# - see advanced
 df_ts  # noqa: B018
 # %%
 # Performance of in separate parts by time step - plot
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# We select only some metrics (3) and use pandas plot function.
 df_ts[["assembly_time", "dirichlet_time", "linear_solver_time"]].plot(
     logy=True, grid=True
 )
 
 # %%
 # Analysis of convergence criteria - Newton iterations
-# ----------------------------------------------------------------
-# :py:mod:`ogstools.logparser.analysis_convergence_newton_iteration`
+# ----------------------------------------------------
+# The :py:mod:`ogstools.logparser.analysis_convergence_newton_iteration`
+# function allows for the analysis of convergence criteria based on
+# Newton iterations. The resulting table provides convergence criteria for monolithic processes.
+# Each row represents convergence metrics such as `global component norm |x|`, `change of global component norm |dx|` (change between 2 iteration of non linear solver)
+# and `relative change of global component |dx|/|x|` at different time steps, processes and non linear solver iterations.
 analysis_convergence_newton_iteration(df_log)
 
 
 # %%
 # Staggered
-# ----------------------------------------------------------------
+# ---------
+# The resulting table provides convergence criteria for staggered coupled processes,
+# Each row represents convergence metrics such as `global component norm |x|`, `change of global component norm |dx|` (change between 2 iteration of non linear solver)
+# and `relative change of global component |dx|/|x|` at different time steps and coupling
+# iterations
+
 # :py:mod:`ogstools.logparser.analysis_convergence_coupling_iteration`
 # We use the logs generated when running
 # https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/Parabolic/HT/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj
diff --git a/docs/examples/howto_logparser/plot_logparser_intro.py b/docs/examples/howto_logparser/plot_logparser_intro.py
index f88448c3c0972e966fede29a56e96e0534e71326..721d36cd672a528da8956efc64267559accbcad6 100644
--- a/docs/examples/howto_logparser/plot_logparser_intro.py
+++ b/docs/examples/howto_logparser/plot_logparser_intro.py
@@ -1,6 +1,6 @@
 """
 Introduction
-================================
+============
 
 This basic example shows a how to analyse the OGS log output to get information
 about performance of different parts of ogs.
@@ -21,39 +21,48 @@ from ogstools.logparser.examples import (
     const_viscosity_thermal_convection_log,
 )
 
-# %%
+# %% [markdown]
 # The log file
 # -------------
 # `log` is a str representing the location of the ogs log file.
-# Make sure the log file does not contain ANSI escape (e.g.color) code. https://en.wikipedia.org/wiki/ANSI_escape_code
+# Make sure the log file does not contain ANSI escape (e.g.color) code.
+# https://en.wikipedia.org/wiki/ANSI_escape_code
 # Only if: You can remove it: ``cat ogs.log | sed 's/\x1b\[[0-9;]*m//g' > ogs.log```
-log = const_viscosity_thermal_convection_log
+
 
 # %%
 # Parsing steps
-# ----------------------------
-# The functions :py:mod:`ogstools.logparser.parse_file`  and :py:mod:`ogstools.logparser.fill_ogs_context` are explained in :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_advanced.py`.
+# -------------
+# The functions :py:mod:`ogstools.logparser.parse_file` and
+# :py:mod:`ogstools.logparser.fill_ogs_context` are explained in
+# :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_advanced.py`.
 # All predefined analyses need the result of fill_ogs_context.
-records = parse_file(log)
+records = parse_file(const_viscosity_thermal_convection_log)
 df_records = pd.DataFrame(records)
 df_log = fill_ogs_context(df_records)
 
 # %%
 # Use predefined analyses
-# ----------------------------------------------------------------
-# :py:mod:`ogstools.logparser.time_step_vs_iterations` is one of many predined analyses. All possibilities are shown here:
+# -----------------------
+# :py:mod:`ogstools.logparser.time_step_vs_iterations` is one of many predined
+# analyses. All possibilities are shown here:
 # :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_analyses.py`.
 #
-# Here we are interested in every time step of the simulation and how many iterations have been needed.
-# For analysis runs only with log of log-level `ogs -l info` or `ogs - l debug` according to
+# Here we are interested in every time step of the simulation and how many
+# iterations have been needed.
+# For analysis runs only with log of log-level `ogs -l info` or `ogs - l debug`
+# according to
 # (see: https://www.opengeosys.org/docs/devguide/advanced/log-and-debug-output)
 
 df_ts_it = time_step_vs_iterations(df_log)
-# The result is a pandas.DataFrame. You may manipulate the dataframe to your needs with pandas functionality.
+# The result is a pandas.DataFrame. You may manipulate the dataframe to your
+# needs with pandas functionality.
 df_ts_it  # noqa: B018
 
 # %%
 # Pandas to plot
-# -------------------
-# You can directly use pandas plot https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html function from the resulting DataFrame.
+# --------------
+# You can directly use pandas plot
+# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html
+# function from the resulting DataFrame.
 df_ts_it.plot(grid=True)
diff --git a/ogstools/logparser/common_ogs_analyses.py b/ogstools/logparser/common_ogs_analyses.py
index 885e6cdf3ff7ef22c0207593b36f9b51d93999f8..c6608c7319d92845988308733364decd6e55a41d 100644
--- a/ogstools/logparser/common_ogs_analyses.py
+++ b/ogstools/logparser/common_ogs_analyses.py
@@ -46,13 +46,11 @@ def pre_post_check(interest: list[str], context: list[str]) -> Callable:
     It checks the DataFrame against specified 'interest' and 'context' criteria both
     before and after the function is called.
 
-    Parameters:
-    - interest (List[str]): A list of strings indicating the columns of interest in the DataFrame.
-    - context (List[str]): A list of strings indicating the context columns in the DataFrame
-                           that should be checked.
 
-    Returns:
-    - A decorator function that takes a function accepting a pandas DataFrame and
+    :param interest: indicates the columns of interest in the DataFrame.
+    :param context: indicates the context columns in the DataFrame that should be checked.
+
+    :return: A decorator function that takes a function accepting a pandas DataFrame and
       returns a modified DataFrame, wrapping it with pre-check and post-check logic
       based on the specified 'interest' and 'context'.
     """
@@ -195,32 +193,26 @@ def analysis_simulation_termination(df: pd.DataFrame):
     return pd.DataFrame()
 
 
-def fill_ogs_context(df_raw_log: pd.DataFrame):
+def fill_ogs_context(df_raw_log: pd.DataFrame) -> pd.DataFrame:
     """
     Fill missing values in OpenGeoSys (OGS) log DataFrame by context.
+    This function fills missing values in an OpenGeoSys (OGS) log DataFrame by context.
 
-    This function fills missing values in an OpenGeoSys (OGS) log DataFrame by context. Some logs do not contain information about time_step and iteration. The information must be collected by context, by surrounding log lines from the same MPI process. Logs are grouped by MPI process to get only surrounding log lines from the same MPI process. It is assumed that all following lines belong to the same time step until the next collected value of the time step. Some columns that contain actual integer values are converted to float.
-
-    Parameters:
-    - df (pd.DataFrame): DataFrame containing the raw OGS log data. Usually, the result of pd.DataFrame(parse_file(file))
+    :param df_raw_log: DataFrame containing the raw OGS log data. Usually, the result of pd.DataFrame(parse_file(file))
 
-    Returns:
-    - pd.DataFrame: DataFrame with missing values filled by context.
+    :return: pd.DataFrame with missing values filled by context.
 
     References:
-    - Pandas documentation : https://pandas.pydata.org/pandas-docs/stable/user_guide/
-
-    Todo:
-    - List of columns with integer values are known from regular expression.
+    Pandas documentation : https://pandas.pydata.org/pandas-docs/stable/user_guide/
 
     Notes:
-    - Some logs do not contain information about time_step and iteration. The information must be collected by context (by surrounding log lines from same mpi_process)
-      Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
-      There are log lines that give the current time step (when time step starts).
-      It can be assumed that in all following lines belong to this time steps, until next collected value of time step
-      Some columns that contain actual integer values are converted to float
-      See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
-      ToDo list of columns with integer values are known from regular expression
+    Some logs do not contain information about time_step and iteration. The information must be collected by context (by surrounding log lines from same mpi_process)
+    Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
+    There are log lines that give the current time step (when time step starts).
+    It can be assumed that in all following lines belong to this time steps, until next collected value of time step
+    Some columns that contain actual integer values are converted to float
+    See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
+    ToDo list of columns with integer values are known from regular expression
 
     """
     int_columns = [