Commits (10)
......@@ -72,3 +72,9 @@ Zhang Ning <zhangning737@gmail.com> Rooney <43574869+zhangning737@users.noreply.
Zhang Ning <zhangning737@gmail.com> zhangning737 <zhangning737@gmail.com>
dependabot[bot] <no-email> dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
dependabot[bot] <no-email> dependabot[bot] <dependabot[bot]@users.noreply.github.com>
Robert Habel <Robert.Habel@student.tu-freiberg.de> Robert Habel <robert.habel@student.tu-freiberg.de>
Linda Günther <linda.guenther@student.tu-freiberg.de> Linda Günther <Linda.Guenther@student.tu-freiberg.de>
Sophia Einspänner <sophia.einspaenner@student.tu-freiberg.de> Sophia Einspänner <Sophia.Einspaenner@student.tu-freiberg.de>
Jakob Randow <jakob.randow@web.de> jrandow <jakob.randow@htwk-leipzig.de>
Jakob Randow <jakob.randow@web.de> jrandow <jakob.randow@web.de>
Olaf Kolditz <olaf.kolditz@ufz.de>
......@@ -10,9 +10,9 @@ Packages = Hugo
BasedOnStyles = Vale, write-good, ogs
Vale.Spelling = None
write-good.ThereIs = suggestion
write-good.Passive = suggestion
write-good.TooWordy = suggestion
write-good.Weasel = suggestion
write-good.Passive = None
write-good.TooWordy = None
write-good.Weasel = None
write-good.E-Prime = None
TokenIgnores = \
......
......@@ -10,8 +10,11 @@ license: BSD-3-Clause
doi: 10.5281/zenodo.591265
# The following needs to be adapted on releases:
version: "6.4.3"
date-released: "2022-04-01"
date-released: "2022-09-19"
identifiers:
- type: doi
description: "Zenodo DOI for 6.4.3"
value: 10.5281/zenodo.7092676
- type: doi
description: "Zenodo DOI for 6.4.2"
value: 10.5281/zenodo.6405711
......
......@@ -277,7 +277,7 @@ AddTest(
REQUIREMENTS NOT OGS_USE_MPI
DIFF_DATA
expected_square_1e2_UC_late_ts_10_t_1000.000000.vtu square_1e2_UC_late_ts_10_t_1000.000000.vtu displacement displacement 1e-13 1e-16
expected_square_1e2_UC_late_ts_10_t_1000.000000.vtu square_1e2_UC_late_ts_10_t_1000.000000.vtu pressure pressure 1e-13 1e-16
expected_square_1e2_UC_late_ts_10_t_1000.000000.vtu square_1e2_UC_late_ts_10_t_1000.000000.vtu pressure pressure 2e-13 1e-16
)
AddTest(
......
......@@ -16,7 +16,7 @@
#include "BaseLib/DynamicSpan.h"
#include "MathLib/KelvinVector.h"
#include "MathLib/LinAlg/Eigen/EigenMapTools.h"
#include "TransposeInPlace.h"
namespace ProcessLib
{
......@@ -65,17 +65,25 @@ std::vector<double> const& getIntegrationPointKelvinVectorData(
return cache;
}
//! Overload without \c cache argument.
//!
//! \note This function returns the data in transposed storage order compared to
//! the overloads that have a \c cache argument.
template <int DisplacementDim, typename IntegrationPointDataVector,
typename MemberType>
std::vector<double> getIntegrationPointKelvinVectorData(
IntegrationPointDataVector const& ip_data_vector, MemberType member)
{
std::vector<double> ip_kelvin_vector_values;
getIntegrationPointKelvinVectorData<DisplacementDim>(
ip_data_vector, member, ip_kelvin_vector_values);
constexpr int kelvin_vector_size =
MathLib::KelvinVector::kelvin_vector_dimensions(DisplacementDim);
return ip_kelvin_vector_values;
return transposeInPlace<kelvin_vector_size>(
[&](std::vector<double>& values)
{
return getIntegrationPointKelvinVectorData<DisplacementDim>(
ip_data_vector, member, values);
;
});
}
template <int DisplacementDim, typename IntegrationPointDataVector,
......@@ -207,9 +215,9 @@ std::vector<double> getIntegrationPointDataMaterialStateVariables(
std::vector<double> result;
result.reserve(ip_data_vector.size() * n_components);
for (auto& ip_data_vector : ip_data_vector)
for (auto& ip_data : ip_data_vector)
{
auto const values_span = get_values_span(*(ip_data_vector.*member));
auto const values_span = get_values_span(*(ip_data.*member));
assert(values_span.size() == static_cast<std::size_t>(n_components));
result.insert(end(result), values_span.begin(), values_span.end());
......@@ -230,7 +238,7 @@ std::size_t setIntegrationPointDataMaterialStateVariables(
auto const n_integration_points = ip_data_vector.size();
std::size_t position = 0;
for (auto& ip_data : ip_data_vector)
for (auto const& ip_data : ip_data_vector)
{
auto const values_span = get_values_span(*(ip_data.*member));
std::copy_n(values + position, values_span.size(), values_span.begin());
......
......@@ -41,7 +41,7 @@
<convergence_criterion>
<type>DeltaX</type>
<norm_type>NORM2</norm_type>
<reltol>1e-14</reltol>
<reltol>1e-13</reltol>
</convergence_criterion>
<time_discretization>
<type>BackwardEuler</type>
......
/**
* \file
* \copyright
* Copyright (c) 2012-2022, OpenGeoSys Community (http://www.opengeosys.org)
* Distributed under a Modified BSD License.
* See accompanying file LICENSE.txt or
* http://www.opengeosys.org/project/license
*/
#include <gmock/gmock-matchers.h>
#include <gtest/gtest.h>
#include "ProcessLib/Utils/SetOrGetIntegrationPointData.h"
template <int DisplacementDim>
struct IPData
{
using KV = MathLib::KelvinVector::KelvinVectorType<DisplacementDim>;
KV kelvin;
double scalar;
};
template <class Dim>
struct ProcessLib_IPDataAccess : ::testing::Test
{
static constexpr int dim = Dim::value;
static constexpr int kv_size =
MathLib::KelvinVector::kelvin_vector_dimensions(dim);
static std::vector<IPData<Dim::value>> getIPData()
{
using KV = typename IPData<dim>::KV;
constexpr int off_diag_size = dim == 2 ? 1 : 3;
constexpr std::size_t num_int_pts = 10;
std::vector<IPData<dim>> ip_data(num_int_pts);
for (std::size_t i = 0; i < num_int_pts; ++i)
{
ip_data[i].kelvin =
KV::Constant(10. * i) + KV::LinSpaced(0., kv_size - 1.);
// compensate Kelvin vector <-> symmetric tensor conversion
ip_data[i].kelvin.template tail<off_diag_size>() *= std::sqrt(2.0);
ip_data[i].scalar = 10. * num_int_pts + i;
}
return ip_data;
}
static std::vector<IPData<Dim::value>> getIPDataNaNs()
{
using KV = typename IPData<dim>::KV;
constexpr std::size_t num_int_pts = 10;
constexpr double nan = std::numeric_limits<double>::quiet_NaN();
std::vector<IPData<dim>> ip_data(num_int_pts);
for (std::size_t i = 0; i < num_int_pts; ++i)
{
ip_data[i].kelvin = KV::Constant(nan);
ip_data[i].scalar = nan;
}
return ip_data;
}
static std::vector<double> getScalarData()
{
return {100, 101, 102, 103, 104, 105, 106, 107, 108, 109};
}
static std::vector<double> getKVDataDefaultOrder()
{
if constexpr (dim == 2)
{
return {0, 10, 20, 30, 40, 50, 60, 70, 80, 90, // 1st comp
1, 11, 21, 31, 41, 51, 61, 71, 81, 91, // 2nd comp
2, 12, 22, 32, 42, 52, 62, 72, 82, 92, // 3rd comp
3, 13, 23, 33, 43, 53, 63, 73, 83, 93}; // 4th comp
}
else if constexpr (dim == 3)
{
return {0, 10, 20, 30, 40, 50, 60, 70, 80, 90, //
1, 11, 21, 31, 41, 51, 61, 71, 81, 91, //
2, 12, 22, 32, 42, 52, 62, 72, 82, 92, //
3, 13, 23, 33, 43, 53, 63, 73, 83, 93, //
4, 14, 24, 34, 44, 54, 64, 74, 84, 94, //
5, 15, 25, 35, 45, 55, 65, 75, 85, 95};
};
}
static std::vector<double> getKVDataTransposedOrder()
{
if constexpr (dim == 2)
{
return {0, 1, 2, 3, // 1st IP
10, 11, 12, 13, // 2nd IP
20, 21, 22, 23, // 3rd IP
30, 31, 32, 33, // ...
40, 41, 42, 43, //
50, 51, 52, 53, //
60, 61, 62, 63, //
70, 71, 72, 73, //
80, 81, 82, 83, //
90, 91, 92, 93};
}
else if constexpr (dim == 3)
{
return {
0, 1, 2, 3, 4, 5, //
10, 11, 12, 13, 14, 15, //
20, 21, 22, 23, 24, 25, //
30, 31, 32, 33, 34, 35, //
40, 41, 42, 43, 44, 45, //
50, 51, 52, 53, 54, 55, //
60, 61, 62, 63, 64, 65, //
70, 71, 72, 73, 74, 75, //
80, 81, 82, 83, 84, 85, //
90, 91, 92, 93, 94, 95,
};
};
}
};
using ProcessLib_IPDataAccess_TestCases =
::testing::Types<std::integral_constant<int, 2>,
std::integral_constant<int, 3>>;
TYPED_TEST_SUITE(ProcessLib_IPDataAccess, ProcessLib_IPDataAccess_TestCases);
TYPED_TEST(ProcessLib_IPDataAccess, GetScalarData)
{
constexpr int dim = TypeParam::value;
auto const ip_data = this->getIPData();
std::vector<double> cache;
ProcessLib::getIntegrationPointScalarData(
ip_data, &IPData<dim>::scalar, cache);
ASSERT_THAT(cache,
testing::Pointwise(testing::DoubleEq(), this->getScalarData()));
}
TYPED_TEST(ProcessLib_IPDataAccess, GetKelvinVectorDataDefaultOrder)
{
constexpr int dim = TypeParam::value;
auto const ip_data = this->getIPData();
std::vector<double> cache;
ProcessLib::getIntegrationPointKelvinVectorData<dim>(
ip_data, &IPData<dim>::kelvin, cache);
ASSERT_THAT(
cache,
testing::Pointwise(testing::DoubleEq(), this->getKVDataDefaultOrder()));
}
TYPED_TEST(ProcessLib_IPDataAccess, GetKelvinVectorDataTransposedOrder)
{
constexpr int dim = TypeParam::value;
auto const ip_data = this->getIPData();
const std::vector<double> cache =
ProcessLib::getIntegrationPointKelvinVectorData<dim>(
ip_data,
&IPData<dim>::kelvin); // pretty subtle: if no cache argument is
// passed, data is returned transposed
ASSERT_THAT(cache,
testing::Pointwise(testing::DoubleEq(),
this->getKVDataTransposedOrder()));
}
TYPED_TEST(ProcessLib_IPDataAccess, SetScalarData)
{
constexpr int dim = TypeParam::value;
auto ip_data = this->getIPDataNaNs();
auto const cache = this->getScalarData();
auto const num_read = ProcessLib::setIntegrationPointScalarData(
&cache[0], ip_data, &IPData<dim>::scalar);
ASSERT_EQ(ip_data.size(), num_read);
auto const ip_data_expected = this->getIPData();
for (std::size_t i = 0; i < ip_data_expected.size(); ++i)
{
EXPECT_DOUBLE_EQ(ip_data_expected[i].scalar, ip_data[i].scalar)
<< "Values at integration point " << i << " differ.";
}
}
TYPED_TEST(ProcessLib_IPDataAccess, SetKelvinVectorData)
{
constexpr int dim = TypeParam::value;
auto ip_data = this->getIPDataNaNs();
auto const cache = this->getKVDataTransposedOrder();
auto const num_read = ProcessLib::setIntegrationPointKelvinVectorData<dim>(
&cache[0], ip_data, &IPData<dim>::kelvin);
ASSERT_EQ(ip_data.size(), num_read);
auto const ip_data_expected = this->getIPData();
for (std::size_t i = 0; i < ip_data_expected.size(); ++i)
{
EXPECT_THAT(
ip_data[i].kelvin,
testing::Pointwise(testing::DoubleEq(), ip_data_expected[i].kelvin))
<< "Values at integration point " << i << " differ.";
}
}
......@@ -32,6 +32,7 @@ ci_images:
- |
echo "CONTAINER_GCC_IMAGE=${CONTAINER_REGISTRY}/gcc:${CONTAINER_TAG}" >> build.env
echo "CONTAINER_GCC_GUI_IMAGE=${CONTAINER_REGISTRY}/gcc-gui:${CONTAINER_TAG}" >> build.env
echo "CONTAINER_GCC_PM_OFF_IMAGE=${CONTAINER_REGISTRY}/gcc-pm-off:${CONTAINER_TAG}" >> build.env
echo "CONTAINER_CLANG_IMAGE=${CONTAINER_REGISTRY}/clang:${CONTAINER_TAG}" >> build.env
echo "XUNIT_TO_JUNIT_IMAGE=${CONTAINER_REGISTRY}/xunit-to-junit:${CONTAINER_TAG}" >> build.env
echo "PRECOMMIT_IMAGE=${CONTAINER_REGISTRY}/pre-commit:${CONTAINER_TAG}" >> build.env
......@@ -55,6 +56,13 @@ ci_images:
--tag $CONTAINER_GCC_GUI_IMAGE --upload
--cpu-target $CPU_TARGET
--packages build-essential
- poetry run ogscm compiler.py mpi.py ogs.py --build --ogs off
--compiler_version 11
--pm off --ccache
--version_file ../../web/data/versions.json
--tag $CONTAINER_GCC_PM_OFF_IMAGE --upload
--cpu-target $CPU_TARGET
--packages build-essential jq moreutils
- poetry run ogscm compiler.py ogs.py --build --ogs off
--compiler clang --compiler_version 11 --ccache
--version_file ../../web/data/versions.json
......
create cpm cache:
stage: package
dependencies: [meta]
tags: [shell, envinf]
tags: [docker]
needs: [meta, ci_images]
image: $CONTAINER_GCC_PM_OFF_IMAGE
rules:
- if: $CI_COMMIT_TITLE == "[versions] Updated cpm cache info."
when: never
......@@ -47,9 +48,11 @@ create cpm cache:
# Remove merge commit:
if [ $(git show --no-patch --format="%P" HEAD | wc -w) -eq 2 ]; then git checkout HEAD^2; fi
git diff
git config --global user.name "project_120_bot2"
git config --global user.email "project_120_bot2-no-reply@opengeosys.org"
git add ${_versions_file}
git commit -m "[versions] Updated cpm cache info."
git push git@${CI_SERVER_HOST}:${CI_MERGE_REQUEST_SOURCE_PROJECT_PATH}.git HEAD:${CI_COMMIT_REF_NAME}
git commit -n -m "[versions] Updated cpm cache info."
git push "https://project_120_bot2:${REPO_WRITE_KEY}@${CI_SERVER_HOST}/${CI_MERGE_REQUEST_SOURCE_PROJECT_PATH}" HEAD:${CI_COMMIT_REF_NAME}
exit 255 # marks with warning
fi
# external dependencies:
......@@ -70,11 +73,11 @@ create cpm cache:
offline configure:
stage: package
needs:
- job: meta
tags: [shell, envinf]
tags: [docker]
needs: [meta, ci_images]
image: $CONTAINER_GCC_PM_OFF_IMAGE
rules:
- if: $CI_COMMIT_TITLE == "[versions] Updated cpm cache info."
# For testing: remove rules-directive
- if: '$CI_COMMIT_BRANCH == "master"'
variables:
BUILD_DIR: "../build/offline"
......@@ -91,10 +94,8 @@ offline configure:
wget --no-verbose --header "JOB-TOKEN: $CI_JOB_TOKEN" \
${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/external-dependencies/${EXT_VERSION}/ext.tar.gz
- tar xf ext.tar.gz
# Disable network access for CPM CMake run, does not work with petsc.
- |
firejail --noprofile --net=none --blacklist=/usr/bin/mfront --read-write=${CI_BUILDS_DIR} \
cmake ../../ogs -B . -G Ninja --preset release \
cmake ../../ogs -B . -G Ninja --preset release \
-DOGS_USE_PYTHON=OFF -DOGS_DOWNLOAD_CPM_CACHE=ON
rm CMakeCache.txt
cmake ../../ogs -B . -G Ninja --preset release-petsc \
......
......@@ -212,7 +212,9 @@ string(REPLACE "." "_" HDF5_TAG ${ogs.tested_version.hdf5})
set(_hdf5_source GIT_REPOSITORY https://github.com/HDFGroup/hdf5.git GIT_TAG
hdf5-${HDF5_TAG}
)
set(_hdf5_source_file ${OGS_EXTERNAL_DEPENDENCIES_CACHE}/hdf5-${HDF5_TAG}.zip)
set(_hdf5_source_file
${OGS_EXTERNAL_DEPENDENCIES_CACHE}/hdf5-${ogs.tested_version.hdf5}.zip
)
if(EXISTS ${_hdf5_source_file})
set(_hdf5_source URL ${_hdf5_source_file})
elseif(NOT OGS_BUILD_HDF5)
......
......@@ -90,6 +90,7 @@ if($ENV{CI_COMMIT_BRANCH} MATCHES "master|^v[0-9]\.[0-9]\.[0-9]")
set(DOXYGEN_SEARCHENGINE_URL
"https://doxysearch.opengeosys.org/$ENV{CI_COMMIT_BRANCH}/doxysearch.cgi"
)
set(DOXYGEN_SEARCHENGINE YES)
message(STATUS "Doxygen search server: ${DOXYGEN_SEARCHENGINE_URL}")
endif()
if(EXISTS ${PROJECT_BINARY_DIR}/cpp-dependencies.svg)
......
......@@ -27,7 +27,7 @@ weight = 1080
- Check if a [Zenodo release](https://zenodo.org/account/settings/github/repository/ufz/ogs#) is automatically issued
- Issue a scan on [Software Heritage Archive](https://archive.softwareheritage.org/browse/origin/directory/?origin_url=https://gitlab.opengeosys.org/ogs/ogs.git)
- Update `CITATION.cff` and `web/content/publications/_index.md` with new Zenodo DOI
- Update `CITATION.cff` author list (`git shortlog -sne`) and corresponding bibtex-entry in publications web page
- Update `CITATION.cff` author list (`git shortlog -sne 6.4.3...6.4.2`) and corresponding bibtex-entry in publications web page
- Create bugfix branch
- Create new netlify site (in an empty directory)
<!-- vale off -->
......
......@@ -12,22 +12,36 @@ weight = 3
### Cite the software
```bibtex
@software{ogs:6.4.2,
author = {Naumov, Dmitry Yu. and Bilke, Lars and Fischer, Thomas and
Rink, Karsten and Wang, Wenqing and Watanabe, Norihiro and
Lu, Renchao and Grunwald, Norbert and Zill, Florian and
Buchwald, Jörg and Huang, Yonghui and Bathmann, Jasper and
Chen, Chaofan and Chen, Shuang and Meng, Boyan and
Shao, Haibing and Kern, Dominik and Yoshioka, Keita and
Garibay Rodriguez, Jaime and Miao, Xingyuan and
Parisio, Francesco and Silbermann, Christian and Thiedau, Jan and
Walther, Marc and Kaiser, Sonja and Boog, Johannes and
Zheng, Tianyuan and Meisel, Tobias and Ning, Zhang},
doi = {10.5281/zenodo.6405711},
month = {4},
title = {OpenGeoSys},
url = {https://www.opengeosys.org},
year = {2022}
@software{ogs:6.4.3,
author = {Lars Bilke and
Thomas Fischer and
Dmitri Naumov and
Christoph Lehmann and
Wenqing Wang and
Renchao Lu and
Boyan Meng and
Karsten Rink and
Norbert Grunwald and
Jörg Buchwald and
Christian Silbermann and
Robert Habel and
Linda Günther and
Mostafa Mollaali and
Tobias Meisel and
Jakob Randow and
Sophia Einspänner and
Haibing Shao and
Kata Kurgyis and
Olaf Kolditz and
Jaime Garibay},
title = {OpenGeoSys},
month = apr,
year = 2022,
note = {{If you use this software, please cite it using these metadata.}},
publisher = {Zenodo},
version = {6.4.3},
doi = {10.5281/zenodo.7092676},
url = {https://doi.org/10.5281/zenodo.7092676}
}
```
......
......@@ -71,8 +71,8 @@
}
},
"cpm": {
"package_file_id": 182,
"package_file_sha256": "00d7dea24754ad415e7003535b36a7d5b4e7224701341f5ca587f93e42b63563"
"package_file_id": 193,
"package_file_sha256": "999f770315795d9de91b6a97e5e1cdee9c0256cd338977ff9a5ec9ff79b7fd0e"
},
"ext": {
"cache_hash": "e6f3f1f4c29c6c5f096f89785e6e245bdf39ac1a"
......