diff --git a/MathLib/LinAlg/LinAlg.h b/MathLib/LinAlg/LinAlg.h index 0dee45dd1353c2afce2853a24bd2edbe6a1dd95b..154d8b45a80118f40162828c2477f9a3715927d0 100644 --- a/MathLib/LinAlg/LinAlg.h +++ b/MathLib/LinAlg/LinAlg.h @@ -149,7 +149,7 @@ namespace LinAlg // Vector /// Set local accessible vector in order to get entries. -/// Call this before call operator[] or get(...). +/// Call this before call operator[] or get(...) of x. void setLocalAccessibleVector(PETScVector const& x); void set(PETScVector& x, double const a); @@ -210,8 +210,14 @@ namespace LinAlg // Vector -/// Set local accessible vector in order to get entries. -/// Call this before call operator[] or get(...). Not used for EIGEN +/** + Set local accessible vector in order to get entries. Call this + before call operator[] or get(...) of x. + The function only has computation if DDC is appied, + e.g. parallel computing. Up to now, Eigen vector is not used for + global vectors in parallel computing. Therefore this function is + empty in the current status. +*/ void setLocalAccessibleVector(EigenVector const& x); void set(EigenVector& x, double const a); diff --git a/MathLib/LinAlg/PETSc/PETScLinearSolver.cpp b/MathLib/LinAlg/PETSc/PETScLinearSolver.cpp index e4fa6ee8052e066854a8e7fe16ed6c1524906a4c..a563e4e47c9f9cdb117f7aed1ba991216c3f9853 100644 --- a/MathLib/LinAlg/PETSc/PETScLinearSolver.cpp +++ b/MathLib/LinAlg/PETSc/PETScLinearSolver.cpp @@ -83,7 +83,7 @@ bool PETScLinearSolver::solve(PETScMatrix& A, PETScVector &b, PETScVector &x) KSPSetOperators(_solver, A.getRawMatrix(), A.getRawMatrix(), DIFFERENT_NONZERO_PATTERN); #endif - KSPSolve(_solver, b.getData(), x.getData()); + KSPSolve(_solver, b.getRawVector(), x.getRawVector()); KSPConvergedReason reason; KSPGetConvergedReason(_solver, &reason); diff --git a/MathLib/LinAlg/PETSc/PETScVector.cpp b/MathLib/LinAlg/PETSc/PETScVector.cpp index ebd841c61a258bfe7114f36160a72da0875037aa..0ef9641355cd7f7df3fd309c657471c946600df0 100644 --- a/MathLib/LinAlg/PETSc/PETScVector.cpp +++ b/MathLib/LinAlg/PETSc/PETScVector.cpp @@ -200,13 +200,10 @@ PetscScalar PETScVector::get(const PetscInt idx) const { return _entry_array[getLocalIndex(idx)]; } - else - { - // Ghost entries, and its original index is 0. - const PetscInt id_p = (idx == -_size) ? 0 : std::abs(idx); - return _entry_array[id_p]; - } - return 0; // avoid warning. + + // Ghost entries, and its original index is 0. + const PetscInt id_p = (idx == -_size) ? 0 : std::abs(idx); + return _entry_array[id_p]; } diff --git a/MathLib/LinAlg/PETSc/PETScVector.h b/MathLib/LinAlg/PETSc/PETScVector.h index 3a936f11ee6d21f0d6f3dfce8fcdc994ebb5eac2..706377cfa4d4d5af9fc2d21608253d2edf671638 100644 --- a/MathLib/LinAlg/PETSc/PETScVector.h +++ b/MathLib/LinAlg/PETSc/PETScVector.h @@ -17,7 +17,6 @@ #pragma once #include <map> -#include <memory> #include <string> #include <vector> @@ -157,6 +156,14 @@ class PETScVector VecSetValues(_v, e_idxs.size(), &e_idxs[0], &sub_vec[0], INSERT_VALUES); } + // TODO preliminary + void setZero() { VecSet(_v, 0.0); } + + /// Disallow moving. + /// \todo This operator should be implemented properly when doing a + /// general cleanup of all matrix and vector classes. + PETScVector& operator = (PETScVector &&) = delete; + /// Set local accessible vector in order to get entries. /// Call this before call operator[] or get(...). void setLocalAccessibleVector() const; @@ -178,12 +185,6 @@ class PETScVector */ void getGlobalVector(std::vector<PetscScalar>& u) const; - /*! - Copy local entries including ghost ones to an array - \param u Preallocated vector for the values of local entries. - */ - void copyValues(std::vector<double>& u) const; - /* Get an entry value. This is an expensive operation, and it only get local value. Use it for only test purpose Get the value of an entry by [] operator. @@ -191,21 +192,6 @@ class PETScVector */ PetscScalar get(const PetscInt idx) const; - // TODO eliminate in favour of getRawVector() - /// Get PETsc vector. Use it only for test purpose - const PETSc_Vec &getData() const - { - return _v; - } - - // TODO preliminary - void setZero() { VecSet(_v, 0.0); } - - /// Disallow moving. - /// \todo This operator should be implemented properly when doing a - /// general cleanup of all matrix and vector classes. - PETScVector& operator = (PETScVector &&) = delete; - //! Exposes the underlying PETSc vector. PETSc_Vec getRawVector() { return _v; } @@ -217,6 +203,11 @@ class PETScVector */ PETSc_Vec getRawVector() const { return _v; } + /*! + Copy local entries including ghost ones to an array + \param u Preallocated vector for the values of local entries. + */ + void copyValues(std::vector<double>& u) const; /*! View the global vector for test purpose. Do not use it for output a big vector. \param file_name File name for output diff --git a/NumLib/DOF/MeshComponentMap.cpp b/NumLib/DOF/MeshComponentMap.cpp index 160c30b8b9119677eef5f4ecd37687395cacf52b..c9be0069205f1c602c74e2cf08106a1e6a9c8aad 100644 --- a/NumLib/DOF/MeshComponentMap.cpp +++ b/NumLib/DOF/MeshComponentMap.cpp @@ -84,9 +84,6 @@ MeshComponentMap::MeshComponentMap( OGS_FATAL("Global index in the system of equations" " can only be numbered by the oder type" " of ComponentOrder::BY_LOCATION"); - // _num_global_dof is used as the global index offset - //global_id = static_cast<GlobalIndexType>( - // _num_global_dof + mesh.getGlobalNodeID(j)); } const bool is_ghost = mesh.isGhostNode(mesh.getNode(j)->getID()); diff --git a/ProcessLib/Process.cpp b/ProcessLib/Process.cpp index f003e2f42e0ff233280f4b87a3823b2c025262bc..25e265394bbc0180413bf33b88b40bec6bbc273e 100644 --- a/ProcessLib/Process.cpp +++ b/ProcessLib/Process.cpp @@ -122,8 +122,6 @@ void Process::assemble(const double t, GlobalVector const& x, GlobalMatrix& M, GlobalMatrix& K, GlobalVector& b, StaggeredCouplingTerm const& coupling_term) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); assembleConcreteProcess(t, x, M, K, b, coupling_term); @@ -138,8 +136,6 @@ void Process::assembleWithJacobian(const double t, GlobalVector const& x, GlobalVector& b, GlobalMatrix& Jac, StaggeredCouplingTerm const& coupling_term) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); MathLib::LinAlg::setLocalAccessibleVector(xdot); @@ -244,8 +240,6 @@ void Process::computeSparsityPattern() void Process::preTimestep(GlobalVector const& x, const double t, const double delta_t) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); preTimestepConcreteProcess(x, t, delta_t); _boundary_conditions.preTimestep(t); @@ -253,8 +247,6 @@ void Process::preTimestep(GlobalVector const& x, const double t, void Process::postTimestep(GlobalVector const& x) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); postTimestepConcreteProcess(x); } @@ -263,8 +255,6 @@ void Process::computeSecondaryVariable(const double t, GlobalVector const& x, StaggeredCouplingTerm const& coupled_term) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); computeSecondaryVariableConcrete(t, x, coupled_term); @@ -277,16 +267,12 @@ void Process::preIteration(const unsigned iter, const GlobalVector &x) cached_var->expire(); } - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); preIterationConcreteProcess(iter, x); } NumLib::IterationResult Process::postIteration(const GlobalVector &x) { - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(x); return postIterationConcreteProcess(x); } diff --git a/ProcessLib/TES/TESProcess.cpp b/ProcessLib/TES/TESProcess.cpp index ffdeb5dd6beac2b80ee9bef2cb59ff29b36e0489..db778b975b82bc62ee2f695cc836e2a551a9153e 100644 --- a/ProcessLib/TES/TESProcess.cpp +++ b/ProcessLib/TES/TESProcess.cpp @@ -285,8 +285,6 @@ NumLib::IterationResult TESProcess::postIterationConcreteProcess( std::vector<double> local_x_cache; std::vector<double> local_x_prev_ts_cache; - // The function only has computation if DDC is appied, - // e.g. Parallel comuting. MathLib::LinAlg::setLocalAccessibleVector(*_x_previous_timestep); auto check_variable_bounds = [&](std::size_t id, @@ -344,7 +342,6 @@ TESProcess::computeVapourPartialPressure( auto const x_nV = Adsorption::AdsorptionReaction::getMolarFraction( x_mV, _assembly_params.M_react, _assembly_params.M_inert); - // TODO Problems with PETSc? (local vs. global index) result_cache->set(node_id, p * x_nV); } @@ -382,7 +379,6 @@ TESProcess::computeRelativeHumidity( auto const p_S = Adsorption::AdsorptionReaction::getEquilibriumVapourPressure(T); - // TODO Problems with PETSc? (local vs. global index) result_cache->set(node_id, p * x_nV / p_S); } @@ -423,7 +419,6 @@ TESProcess::computeEquilibriumLoading( : _assembly_params.react_sys->getEquilibriumLoading( p_V, T, _assembly_params.M_react); - // TODO Problems with PETSc? (local vs. global index) result_cache->set(node_id, C_eq); }