Skip to content
Snippets Groups Projects
Commit bc8bd563 authored by Tobias Meisel's avatar Tobias Meisel
Browse files

[logparser] Improve documentation

parent 702d9740
No related branches found
No related tags found
1 merge request!130Logparser
...@@ -36,6 +36,7 @@ extensions = [ ...@@ -36,6 +36,7 @@ extensions = [
"sphinx_gallery.gen_gallery", "sphinx_gallery.gen_gallery",
] ]
templates_path = ["_templates"] templates_path = ["_templates"]
exclude_patterns = [ exclude_patterns = [
"_build", "_build",
......
How to use the OGS log parser How to use logparser
============================= =============================
.. sectionauthor:: Tobias Meisel (Helmholtz Centre for Environmental Research GmbH - UFZ) .. sectionauthor:: Tobias Meisel (Helmholtz Centre for Environmental Research GmbH - UFZ)
The following jupyter notebooks provide some examples of how to use the log parser. The following jupyter notebooks provide some examples of how to use the logparser.
""" """
Log parser - Advanced topics Advanced topics
================================== ==================================
This example shows how to analyse the OGS log output to get information We cover:
about performance of different parts of ogs.
It uses a log file generated by ogs with project file from the following benchmark:
1. Logs from parallel computation (OGS with MPI runs)
2. Performance tuning
3. Custom analyses
Although these topics are presented together they do not depend on each other and can be used separately.
""" """
# %% # %%
from pathlib import Path
import numpy as np
import pandas as pd import pandas as pd
from ogstools.logparser import ( from ogstools.logparser import (
analysis_convergence_coupling_iteration,
analysis_convergence_newton_iteration,
analysis_time_step, analysis_time_step,
fill_ogs_context, fill_ogs_context,
parse_file, parse_file,
time_step_vs_iterations,
) )
from ogstools.logparser.examples import ( from ogstools.logparser.examples import (
const_viscosity_thermal_convection_log, const_viscosity_thermal_convection_log,
parallel_log,
) )
# %% # %%
log = const_viscosity_thermal_convection_log # 1. Logs from parallel computations (with MPI)
# =================================================================
# The log file to be investigated in this example is the result of a mpirun (-np 3) from https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj
log = parallel_log
records = parse_file(log) records = parse_file(log)
df_records = pd.DataFrame(records) df_records = pd.DataFrame(records)
df_log = fill_ogs_context(df_records) df_parallel = fill_ogs_context(df_records)
df_ts_it = time_step_vs_iterations(df_log) print(df_parallel.columns)
df_ts_it # noqa: B018 df_parallel # noqa: B018
df_ts = analysis_time_step(df_parallel)
# For each mpi_process and each time_step we get the measurements (e.g. output_time)
df_ts # noqa: B018
# %% # %%
df_ts_it.plot(grid=True) # 1.1. Aggregate measurements over all MPI processes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# If you are not particularly interested in the performance of each MPI_process pandas gives you all you need to further process data. However, for performance measurement it is recommended to consider always the slowest MPI_process for meaningful interpretation of overall performance (because of synchronization barriers in the evaluation scheme of OGS).
# Then the resulting DataFrame has the same structure like a DataFrame gained from serial OGS log.
df_ts.groupby("time_step").max()
# %% # %%
# Performance of in separate parts by time step df_ts[["output_time", "assembly_time"]].boxplot()
df_ts = analysis_time_step(df_log)
df_ts = df_ts.loc[
0
] # log of serial so we can remove MPI_process (index=0) from result (all are 0)
df_ts # noqa: B018
# %% # %%
# Data manipulation with pandas # 2. Performance tuning
df_ts[ # =================================================================
["output_time", "assembly_time", "dirichlet_time", "linear_solver_time"] #
].plot(logy=True, grid=True) # You can either (2.1) Reduce set of regular expressions when you exactly know what you final analysis will need AND / OR
# (2.2.) Save and load the pandas.DataFrame for the records.
#
# 2.1. Reduce regular expression
# ----------------------------------------------------------------
# The logparser tries to find matching regular expressions for each line. By default it iterates over all entries specified in :py:mod:`ogstools.logparser.ogs_regexes`.
# You can reduce it to the amount of entries you are actually interested in.
# For this example we are only interested in the number of iterations per time step.
# Because the parsing process is expensive, it is feasible to store the records to a file.
# %% # %%
# 2.2. Save and load records
# ----------------------------------------------------------------
# We recommend to save the records by any of these methodes http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html.
analysis_convergence_newton_iteration(df_log) # df_records.to_hdf("anyfilename.csv")
# pd.read_hdf("anyfilename.csv")
# %% # %%
# Staggered # 3. Custom analyses
# Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj# # =====================
records = parse_file( # 3.1. Introduction into functions of the logparser
"/home/meisel/gitlabrepos/ogstools/staggered_heat_transport_in_stationary_flow.log" # ----------------------------------------------------------------
) # The function :py:mod:`ogstools.logparser.parse_file` iterates over all lines in the log file. For a specific set of regular expressions it finds it creates a new entry into a list (here named records)
#
log = const_viscosity_thermal_convection_log
# Let us print the content of the log file in this example first.
with Path(log).open() as log_file:
print(log_file.read())
# ToDo link to documentation
records = parse_file(log)
# The list of records can directly be transformed into a pandas.DataFrame for further inspections. It is the raw presentation of a filtered ogs log in pandas DataFrame format.
df_records = pd.DataFrame(records) df_records = pd.DataFrame(records)
df_log = fill_ogs_context(df_records) # The logparser is able to find the following entries:
analysis_convergence_coupling_iteration(df_log) print(df_records.columns)
# For each entry :py:mod:`ogstools.logparser.ogs_regexes` has added the type (corresponding to ogs log level) and value found to the result DataFrame.
df_records # noqa: B018
# %% # %%
# Parallel
df_ts.loc[0] # For each information (e.g. a time measurement or numerical metric) we need to know to which timestep, iteration_number, process, component it belongs.
# %% # ToDo link to documentation, add this information to the table
# Advanced df_log = fill_ogs_context(df_records)
df_log # noqa: B018
# %% # %%
## Custom # 3.2. Custom analyses - example
df_records # noqa: B018 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# %% # We create a pivot_table where for each time step we see the step_size and the number of iterations.
x = df_records.pivot_table(["step_size", "iteration_number"], ["time_step"])
x.plot(subplots=True, sharex=True, grid=True)
# Computing logs takes to much time df_custom = df_records.pivot_table(
# custom regexes, log level ["step_size", "iteration_number"], ["time_step"], aggfunc=np.max
# force_parallel )
df_custom # noqa: B018
""" """
Log parser - Predefined Analyses Predefined Analyses
======================================= =======================================
Here we shows the different predefined analysis available in the log parser. Here we shows the different predefined analysis available in the logparser.
We uses the project file from the following benchmark: We uses the project file from the following benchmark:
`ogs: Constant viscosity (Hydro-Thermal) `ogs: Constant viscosity (Hydro-Thermal)
<https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with <https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with
...@@ -35,36 +35,47 @@ df_records = pd.DataFrame(records) ...@@ -35,36 +35,47 @@ df_records = pd.DataFrame(records)
df_log = fill_ogs_context(df_records) df_log = fill_ogs_context(df_records)
# %% # %%
# Every time step of the simulation and how many iterations have been needed. # Iterations per time step
# -------------------------
# :py:mod:`ogstools.logparser.analysis_time_step`
df_ts_it = time_step_vs_iterations(df_log) df_ts_it = time_step_vs_iterations(df_log)
df_ts_it # noqa: B018 df_ts_it # noqa: B018
# %% # %%
# Performance of in separate parts by time step # Performance of in separate parts by time step
# ----------------------------------------------------------------
# :py:mod:`ogstools.logparser.analysis_time_step`
df_ts = analysis_time_step(df_log) df_ts = analysis_time_step(df_log)
df_ts = df_ts.loc[0] df_ts = df_ts.loc[0]
# log of serial so we can remove MPI_process (index=0) from result (all are 0) - see advanced # log of serial so we can remove MPI_process (index=0) from result (all are 0) - see advanced
df_ts # noqa: B018 df_ts # noqa: B018
# %% # %%
# Performance of in separate parts by time step - plot # Performance of in separate parts by time step - plot
df_ts[ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
["output_time", "assembly_time", "dirichlet_time", "linear_solver_time"]
].plot(logy=True, grid=True) df_ts[["assembly_time", "dirichlet_time", "linear_solver_time"]].plot(
logy=True, grid=True
)
# %% # %%
# Analysis of convergence criteria - Newton iterations # Analysis of convergence criteria - Newton iterations
# ----------------------------------------------------------------
# :py:mod:`ogstools.logparser.analysis_convergence_newton_iteration`
analysis_convergence_newton_iteration(df_log) analysis_convergence_newton_iteration(df_log)
# %% # %%
# Staggered # Staggered
# Tests/Data/Parabolic/HT/StaggeredCoupling/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj# # ----------------------------------------------------------------
# :py:mod:`ogstools.logparser.analysis_convergence_coupling_iteration`
# We use the logs generated when running
# https://gitlab.opengeosys.org/ogs/ogs/-/blob/master/Tests/Data/Parabolic/HT/HeatTransportInStationaryFlow/HeatTransportInStationaryFlow.prj
# #
log = staggered_log log = staggered_log
records = parse_file(log) records = parse_file(log)
df_records = pd.DataFrame(records) df_records = pd.DataFrame(records)
df_log = fill_ogs_context(df_records) df_log = fill_ogs_context(df_records)
# Only for staggered coupled processes # Only for staggered coupled processes !
analysis_convergence_coupling_iteration(df_log) analysis_convergence_coupling_iteration(df_log)
""" """
Log parser - Introduction Introduction
================================ ================================
This basic example shows a how to analyse the OGS log output to get information This basic example shows a how to analyse the OGS log output to get information
about performance of different parts of ogs. about performance of different parts of ogs.
It uses the project file from the following benchmark: It uses the project file from the following benchmark:
`ogs: Constant viscosity (Hydro-Thermal) ogs: Constant viscosity (Hydro-Thermal) https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity with
<https://www.opengeosys.org/docs/benchmarks/hydro-thermal/constant-viscosity/>` with
`<t_end> 1e8 </t_end>` `<t_end> 1e8 </t_end>`
""" """
...@@ -23,20 +22,38 @@ from ogstools.logparser.examples import ( ...@@ -23,20 +22,38 @@ from ogstools.logparser.examples import (
) )
# %% # %%
# The log file
# -------------
# `log` is a str representing the location of the ogs log file.
# Make sure the log file does not contain ANSI escape (e.g.color) code. https://en.wikipedia.org/wiki/ANSI_escape_code
# Only if: You can remove it: ``cat ogs.log | sed 's/\x1b\[[0-9;]*m//g' > ogs.log```
log = const_viscosity_thermal_convection_log log = const_viscosity_thermal_convection_log
# Purpose of records and fill_ogs_context is explained in advanced section
# %%
# Parsing steps
# ----------------------------
# The functions :py:mod:`ogstools.logparser.parse_file` and :py:mod:`ogstools.logparser.fill_ogs_context` are explained in :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_advanced.py`.
# All predefined analyses need the result of fill_ogs_context.
records = parse_file(log) records = parse_file(log)
df_records = pd.DataFrame(records) df_records = pd.DataFrame(records)
df_log = fill_ogs_context(df_records) df_log = fill_ogs_context(df_records)
# This is one of many predined analyses. All possibilities are show here:
# %%
# Use predefined analyses
# ----------------------------------------------------------------
# :py:mod:`ogstools.logparser.time_step_vs_iterations` is one of many predined analyses. All possibilities are shown here:
# :ref:`sphx_glr_auto_examples_howto_logparser_plot_logparser_analyses.py`.
#
# Here we are interested in every time step of the simulation and how many iterations have been needed. # Here we are interested in every time step of the simulation and how many iterations have been needed.
# For analysis runs only with log of log-level `ogs -l info` or `ogs - l debug` according to # For analysis runs only with log of log-level `ogs -l info` or `ogs - l debug` according to
# `OpenGeoSys Docs: Log and Debug Output <https://www.opengeosys.org/docs/devguide/advanced/log-and-debug-output/>` # (see: https://www.opengeosys.org/docs/devguide/advanced/log-and-debug-output)
df_ts_it = time_step_vs_iterations(df_log) df_ts_it = time_step_vs_iterations(df_log)
# The result is a pandas.DataFrame. You may manipulate the dataframe to your needs with pandas functionality. # The result is a pandas.DataFrame. You may manipulate the dataframe to your needs with pandas functionality.
df_ts_it # noqa: B018 df_ts_it # noqa: B018
# %% # %%
# Or directly use pandas functionality to plot. # Pandas to plot
# -------------------
# You can directly use pandas plot https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html function from the resulting DataFrame.
df_ts_it.plot(grid=True) df_ts_it.plot(grid=True)
...@@ -195,11 +195,34 @@ def analysis_simulation_termination(df: pd.DataFrame): ...@@ -195,11 +195,34 @@ def analysis_simulation_termination(df: pd.DataFrame):
return pd.DataFrame() return pd.DataFrame()
def fill_ogs_context(df: pd.DataFrame): def fill_ogs_context(df_raw_log: pd.DataFrame):
# Some columns that contain actual integer values are converted to float """
# See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html Fill missing values in OpenGeoSys (OGS) log DataFrame by context.
# ToDo list of columns with integer values are known from regular expression
This function fills missing values in an OpenGeoSys (OGS) log DataFrame by context. Some logs do not contain information about time_step and iteration. The information must be collected by context, by surrounding log lines from the same MPI process. Logs are grouped by MPI process to get only surrounding log lines from the same MPI process. It is assumed that all following lines belong to the same time step until the next collected value of the time step. Some columns that contain actual integer values are converted to float.
Parameters:
- df (pd.DataFrame): DataFrame containing the raw OGS log data. Usually, the result of pd.DataFrame(parse_file(file))
Returns:
- pd.DataFrame: DataFrame with missing values filled by context.
References:
- Pandas documentation : https://pandas.pydata.org/pandas-docs/stable/user_guide/
Todo:
- List of columns with integer values are known from regular expression.
Notes:
- Some logs do not contain information about time_step and iteration. The information must be collected by context (by surrounding log lines from same mpi_process)
Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
There are log lines that give the current time step (when time step starts).
It can be assumed that in all following lines belong to this time steps, until next collected value of time step
Some columns that contain actual integer values are converted to float
See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
ToDo list of columns with integer values are known from regular expression
"""
int_columns = [ int_columns = [
"line", "line",
"mpi_process", "mpi_process",
...@@ -210,10 +233,10 @@ def fill_ogs_context(df: pd.DataFrame): ...@@ -210,10 +233,10 @@ def fill_ogs_context(df: pd.DataFrame):
"component", "component",
"process", "process",
] ]
for column in df.columns: for column in df_raw_log.columns:
if column in int_columns: if column in int_columns:
try: try:
df[column] = df[column].astype("Int64") df_raw_log[column] = df_raw_log[column].astype("Int64")
except ValueError: except ValueError:
print( print(
f"Could not convert column '{column}' to integer due to value error" f"Could not convert column '{column}' to integer due to value error"
...@@ -223,36 +246,29 @@ def fill_ogs_context(df: pd.DataFrame): ...@@ -223,36 +246,29 @@ def fill_ogs_context(df: pd.DataFrame):
f"Could not convert column '{column}' to integer due to type error" f"Could not convert column '{column}' to integer due to type error"
) )
# Some logs do not contain information about time_step and iteration df_raw_log["time_step"] = (
# The information must be collected by context (by surrounding log lines from same mpi_process) df_raw_log.groupby("mpi_process")[["time_step"]]
# Logs are grouped by mpi_process to get only surrounding log lines from same mpi_process
# There are log lines that give the current time step (when time step starts).
# It can be assumed that in all following lines belong to this time steps, until next collected value of time step
df["time_step"] = (
df.groupby("mpi_process")[["time_step"]]
.fillna(method="ffill") .fillna(method="ffill")
.fillna(value=0) .fillna(value=0)
) )
# Back fill, because iteration number can be found in logs at the END of the iteration # Back fill, because iteration number can be found in logs at the END of the iteration
df["iteration_number"] = df.groupby("mpi_process")[ df_raw_log["iteration_number"] = df_raw_log.groupby("mpi_process")[
["iteration_number"] ["iteration_number"]
].fillna(method="bfill") ].fillna(method="bfill")
# ToDo Comment if "component" in df_raw_log:
if "component" in df: df_raw_log["component"] = df_raw_log.groupby("mpi_process")[
df["component"] = df.groupby("mpi_process")[["component"]].fillna( ["component"]
value=-1 ].fillna(value=-1)
)
# Forward fill because process will be printed in the beginning - applied to all subsequent # Forward fill because process will be printed in the beginning - applied to all subsequent
if "process" in df: if "process" in df_raw_log:
df["process"] = df.groupby("mpi_process")[["process"]].fillna( df_raw_log["process"] = df_raw_log.groupby("mpi_process")[
method="bfill" ["process"]
) ].fillna(method="bfill")
# Attention - coupling iteration applies to successor line and to all other predecessors - it needs further processing for specific analysis # Attention - coupling iteration applies to successor line and to all other predecessors - it needs further processing for specific analysis
if "coupling_iteration_process" in df: if "coupling_iteration_process" in df_raw_log:
df["coupling_iteration_process"] = df.groupby("mpi_process")[ df_raw_log["coupling_iteration_process"] = df_raw_log.groupby(
["coupling_iteration_process"] "mpi_process"
].fillna(method="ffill", limit=1) )[["coupling_iteration_process"]].fillna(method="ffill", limit=1)
return df return df_raw_log
info: This is OpenGeoSys-6 version 6.5.0-286-ge7ef7067.
info: OGS started on 2024-03-19 14:15:14+0100.
info: Reading project file /home/meisel/o/s/Tests/Data/Parabolic/LiquidFlow/SimpleSynthetics/PrimaryVariableConstraintDirichletBC/cuboid_1x1x1_hex_1000_Dirichlet_Dirichlet_1.prj.
info: readRasters ...
info: readRasters done
info: ConstantParameter: p0
info: ConstantParameter: part_of_left_boundary_Dirichlet
info: ConstantParameter: part_of_right_boundary_Dirichlet
info: ConstantParameter: constant_porosity_parameter
info: ConstantParameter: kappa1
info: ConstantParameter: p_spatial
info: ConstantParameter: zero
info: ConstantParameter: p_Dirichlet_top
info: No source terms for process variable 'pressure' found.
info: LiquidFlow process is set to be linear.
info: Initialize processes.
info: [time] Output of timestep 0 took 0.00556133 s.
info: OpenGeoSys is now initialized.
info: OGS started on 2024-03-19 14:15:14+0100.
info: Solve processes.
info: === Time stepping at step #1 and time 43200 with step size 43200
info: [time] Assembly took 0.0763653 s.
info: [time] Applying Dirichlet BCs took 0.00057093 s.
info: ------------------------------------------------------------------
info: *** Eigen solver compute()
info: -> compute with Eigen iterative linear solver CG (precon DIAGONAL)
info: ------------------------------------------------------------------
info: *** Eigen solver solve()
info: -> solve with Eigen iterative linear solver CG (precon DIAGONAL)
info: iteration: 47/10000
info: residual: 5.835249e-21
info: [time] Linear solver took 0.0120602 s.
info: [time] Iteration #1 took 0.0891769 s.
info: [time] Solving process #0 took 0.0892044 s in time step #1
info: [time] Time step #1 took 0.0896693 s.
info: [time] Output of timestep 1 took 0.00817603 s.
info: === Time stepping at step #2 and time 86400 with step size 43200
info: [time] Assembly took 0.0627239 s.
info: [time] Applying Dirichlet BCs took 0.000460932 s.
info: ------------------------------------------------------------------
info: *** Eigen solver compute()
info: -> compute with Eigen iterative linear solver CG (precon DIAGONAL)
info: ------------------------------------------------------------------
info: *** Eigen solver solve()
info: -> solve with Eigen iterative linear solver CG (precon DIAGONAL)
info: iteration: 55/10000
info: residual: 7.003839e-21
info: [time] Linear solver took 0.00256477 s.
info: [time] Iteration #1 took 0.065935 s.
info: [time] Solving process #0 took 0.0659825 s in time step #2
info: [time] Time step #2 took 0.0663716 s.
info: [time] Output of timestep 2 took 0.0160351 s.
info: The whole computation of the time stepping took 2 steps, in which
the accepted steps are 2, and the rejected steps are 0.
info: [time] Execution took 0.18038 s.
info: OGS terminated on 2024-03-19 14:15:14+0100.
from importlib import resources from importlib import resources
_prefix = resources.files(__name__) _prefix = resources.files(__name__)
liquid_flow_log = _prefix / "ogs.log"
const_viscosity_thermal_convection_log = ( const_viscosity_thermal_convection_log = (
_prefix / "ConstViscosityThermalConvection.log" _prefix / "ConstViscosityThermalConvection.log"
) )
staggered_log = "staggered_heat_transport_in_stationary_flow.log" staggered_log = _prefix / "staggered_heat_transport_in_stationary_flow.log"
parallel_log = _prefix / "steady_state_diffusion_parallel.log"
info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
info: OGS started on 2024-03-21 13:17:33+0100.
info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
info: OGS started on 2024-03-21 13:17:33+0100.
info: This is OpenGeoSys-6 version 6.5.0-167-gd7633c76.dirty.
info: OGS started on 2024-03-21 13:17:33+0100.
[2] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
[0] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
[1] info: Reading project file /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3_XDMF_np3.prj.
[0] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
[1] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
[2] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/steady_state_diffusion.include into project file.
[0] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
[2] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
[1] info: Including /home/meisel/o/s/Tests/Data/EllipticPETSc/cube_1e3.include into project file.
[0] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
[2] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
[1] info: Reading corresponding part of mesh data from binary file cube_1x1x1_hex_1e3 ...
[0] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
You can ignore this warning if the mesh does not contain integration_point-wise property data.
[2] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
You can ignore this warning if the mesh does not contain integration_point-wise property data.
[1] warning: Could not open file 'cube_1x1x1_hex_1e3_partitioned_integration_point_properties_cfg3.bin'.
You can ignore this warning if the mesh does not contain integration_point-wise property data.
[1] info: [time] Reading the mesh took 0.007251 s.
[2] info: [time] Reading the mesh took 0.007835 s.
[0] info: [time] Reading the mesh took 0.008025 s.
[2] info: readRasters ...
[2] info: readRasters done
[2] info: ConstantParameter: p0
[2] info: ConstantParameter: p_Dirichlet_left
[2] info: ConstantParameter: p_Dirichlet_right
[1] info: readRasters ...
[1] info: readRasters done
[1] info: ConstantParameter: p0
[2] info: No source terms for process variable 'pressure' found.
[1] info: ConstantParameter: p_Dirichlet_left
[1] info: ConstantParameter: p_Dirichlet_right
[0] info: readRasters ...
[0] info: readRasters done
[1] info: No source terms for process variable 'pressure' found.
[0] info: ConstantParameter: p0
[0] info: ConstantParameter: p_Dirichlet_left
[0] info: ConstantParameter: p_Dirichlet_right
[0] info: No source terms for process variable 'pressure' found.
[1] info: Initialize processes.
[2] info: Initialize processes.
[0] info: Initialize processes.
[0] info: HDF5: Using a single chunk for dataset geometry .
[2] info: HDF5: Using a single chunk for dataset geometry .
[1] info: HDF5: Using a single chunk for dataset geometry .
[2] info: HDF5: Using a single chunk for dataset geometry .
[2] info: HDF5: Using a single chunk for dataset topology .
[1] info: HDF5: Using a single chunk for dataset geometry .
[1] info: HDF5: Using a single chunk for dataset topology .
[0] info: HDF5: Using a single chunk for dataset geometry .
[0] info: HDF5: Using a single chunk for dataset topology .
[0] info: HDF5: Using a single chunk for dataset pressure .
[1] info: HDF5: Using a single chunk for dataset pressure .
[2] info: HDF5: Using a single chunk for dataset pressure .
[0] info: HDF5: Using a single chunk for dataset v .
[2] info: HDF5: Using a single chunk for dataset v .
[1] info: HDF5: Using a single chunk for dataset v .
[1] info: [time] Output of timestep 0 took 0.00447257 s.
[2] info: [time] Output of timestep 0 took 0.00447257 s.
[2] info: Solve processes.
[0] info: [time] Output of timestep 0 took 0.00449248 s.
[0] info: Solve processes.
[1] info: Solve processes.
[1] info: === Time stepping at step #1 and time 0.1 with step size 0.1
[2] info: === Time stepping at step #1 and time 0.1 with step size 0.1
[0] info: === Time stepping at step #1 and time 0.1 with step size 0.1
[1] info: [time] Assembly took 0.0058952 s.
[0] info: [time] Assembly took 0.00589635 s.
[2] info: [time] Assembly took 0.00589633 s.
[0] info: [time] Applying Dirichlet BCs took 0.000100729 s.
[1] info: [time] Applying Dirichlet BCs took 9.7956e-05 s.
[2] info: [time] Applying Dirichlet BCs took 0.000100732 s.
================================================
Linear solver bcgs with mg preconditioner using PRECONDITIONED
converged in 15 iterations (relative convergence criterion fulfilled).
================================================
[0] info: [time] Linear solver took 0.0031392 s.
[2] info: [time] Linear solver took 0.0031265 s.
[2] info: [time] Iteration #1 took 0.00918299 s.
[1] info: [time] Linear solver took 0.0031268 s.
[1] info: [time] Iteration #1 took 0.00917942 s.
[0] info: [time] Iteration #1 took 0.00918262 s.
[0] info: [time] Solving process #0 took 0.00926697 s in time step #1
[1] info: [time] Solving process #0 took 0.00926667 s in time step #1
[2] info: [time] Solving process #0 took 0.00926656 s in time step #1
[2] info: [time] Time step #1 took 0.0093221 s.
[0] info: [time] Time step #1 took 0.00931536 s.
[1] info: [time] Time step #1 took 0.00932201 s.
[2] info: [time] Output of timestep 1 took 0.00231362 s.
[2] info: The whole computation of the time stepping took 1 steps, in which
the accepted steps are 1, and the rejected steps are 0.
[1] info: [time] Output of timestep 1 took 0.00231531 s.
[1] info: The whole computation of the time stepping took 1 steps, in which
the accepted steps are 1, and the rejected steps are 0.
[1] info: [time] Execution took 0.0873399 s.
[0] info: [time] Output of timestep 1 took 0.00231734 s.
[0] info: The whole computation of the time stepping took 1 steps, in which
the accepted steps are 1, and the rejected steps are 0.
[0] info: [time] Execution took 0.0873461 s.
[2] info: [time] Execution took 0.087346 s.
[0] info: [time] Output of XDMF to cube_1e3_np3_cube_1x1x1_hex_1e3.xdmf took 8.7512e-05 s.
[1] info: OGS terminated on 2024-03-21 13:17:34+0100.
[0] info: OGS terminated on 2024-03-21 13:17:34+0100.
[2] info: OGS terminated on 2024-03-21 13:17:34+0100.
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
import re import re
from pathlib import Path from pathlib import Path
from typing import Any, Optional, Union from typing import Any, Callable, Optional, Union
from ogstools.logparser.ogs_regexes import ogs_regexes from ogstools.logparser.ogs_regexes import ogs_regexes
...@@ -80,6 +80,7 @@ def parse_file( ...@@ -80,6 +80,7 @@ def parse_file(
file_name: Union[str, Path], file_name: Union[str, Path],
maximum_lines: Optional[int] = None, maximum_lines: Optional[int] = None,
force_parallel: bool = False, force_parallel: bool = False,
ogs_res: Callable = ogs_regexes,
) -> list[Any]: ) -> list[Any]:
""" """
Parses a log file from OGS, applying regex patterns to extract specific information, Parses a log file from OGS, applying regex patterns to extract specific information,
...@@ -99,7 +100,6 @@ def parse_file( ...@@ -99,7 +100,6 @@ def parse_file(
if isinstance(file_name, str): if isinstance(file_name, str):
file_name = Path(file_name) file_name = Path(file_name)
file_name = Path(file_name) file_name = Path(file_name)
ogs_res = ogs_regexes()
parallel_log = force_parallel or mpi_processes(file_name) > 1 parallel_log = force_parallel or mpi_processes(file_name) > 1
if parallel_log: if parallel_log:
...@@ -113,7 +113,7 @@ def parse_file( ...@@ -113,7 +113,7 @@ def parse_file(
return lambda regex: re.compile(mpi_process_regex + regex) return lambda regex: re.compile(mpi_process_regex + regex)
compile_re = compile_re_fn(process_regex) compile_re = compile_re_fn(process_regex)
patterns = [(compile_re(k), v) for k, v in ogs_res] patterns = [(compile_re(k), v) for k, v in ogs_res()]
number_of_lines_read = 0 number_of_lines_read = 0
with file_name.open() as file: with file_name.open() as file:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment