Commit c3de0745 authored by Lukas Riedel's avatar Lukas Riedel

Merge branch 'feature/parallel-solve-ovlp-only'

parents 3ad6640e 58f23ee8
# File for module specific CMake tests.
# find all required packages
FIND_PACKAGE (HDF5 REQUIRED COMPONENTS C)
if(HDF5_IS_PARALLEL)
message(STATUS "Parallel HDF5 library found")
add_definitions(-DHDF5_PARALLEL)
FIND_PACKAGE (HDF5 REQUIRED)
if(NOT HDF5_IS_PARALLEL)
message(SEND_ERROR "Parallel HDF5 must be installed!")
endif()
add_definitions(-DHDF5_PARALLEL)
FIND_PACKAGE (FFTW REQUIRED)
FIND_PACKAGE (SuperLU REQUIRED)
FIND_PACKAGE (MPI REQUIRED)
......
......@@ -46,8 +46,10 @@
#include <dune/pdelab/common/function.hh>
#include <dune/pdelab/constraints/p0.hh>
#include <dune/pdelab/constraints/p0ghost.hh>
#include <dune/pdelab/constraints/conforming.hh>
#include <dune/pdelab/constraints/common/constraints.hh>
#include <dune/pdelab/finiteelementmap/qkdg.hh>
#include <dune/pdelab/finiteelementmap/p0fem.hh>
#include <dune/pdelab/finiteelementmap/pkfem.hh>
#include <dune/pdelab/gridfunctionspace/gridfunctionspace.hh>
#include <dune/pdelab/gridfunctionspace/localfunctionspace.hh>
......
......@@ -11,17 +11,18 @@ Simulation<Traits>::Simulation (Dune::MPIHelper& _helper, std::shared_ptr<Grid>
verbose(inifile.get<int>("output.verbose"))
{
Dune::Timer timer;
const int verbose_low = helper.rank() == 0 ? verbose : 0;
// --- Grid Function Space ---
gfs = std::make_unique<GFS>(GFSHelper::create(gv));
gfs->name("matric_head");
cc = std::make_unique<CC>();
Dune::PDELab::constraints(*gfs,*cc,true);
Dune::PDELab::constraints(*gfs,*cc,false);
// --- Parameter class ---
InterpolatorFactory intfac(inifile,helper,verbose);
InterpolatorFactory intfac(inifile,helper,verbose_low);
interp = intfac.create();
ParameterizationFactory pfac(inifile,helper,*interp,verbose);
ParameterizationFactory pfac(inifile,helper,*interp,verbose_low);
param = pfac.create();
// --- Operator Helper Classes ---
......@@ -63,20 +64,44 @@ void Simulation<Traits>::operator_setup ()
go1 = std::make_unique<GO1>(*gfs,*cc,*gfs,*cc,*tlop,mbe_tlop);
igo = std::make_unique<IGO>(*go0,*go1);
// --- Solvers ---
ls = std::make_unique<LS>(5000,0);
pdesolver = std::make_unique<PDESOLVER>(*igo,*ls);
pdesolver->setParameters(inifile.sub("NewtonParameters"));
pdesolver->setVerbosityLevel(verbose);
// build parallel solvers
if (helper.size() > 1) {
// --- Solvers ---
lsgfs = std::make_unique<LSGFS>(LSGFSHelper::create(gv));
lscc = std::make_unique<LSCC>();
ls_par = std::make_unique<LSPar>(*igo,*cc,*lsgfs,*lscc,1000,0,true,true);
pdesolver_par = std::make_unique<PDESOLVERPar>(*igo,*ls_par);
pdesolver_par->setParameters(inifile.sub("NewtonParameters"));
pdesolver_par->setVerbosityLevel(verbose);
// --- Time Step Operators ---
Dune::PDELab::OneStepMethodResult osm_result;
if(osm_par){
osm_result = osm_par->result(); // cache old result
}
osm_par = std::make_unique<OSMPar>(alex2,*igo,*pdesolver_par);
osm_par->setResult(osm_result);
osm_par->setVerbosityLevel(verbose+1);
}
// build sequential solvers
else {
ls_seq = std::make_unique<LSSeq>(5000,0);
pdesolver_seq = std::make_unique<PDESOLVERSeq>(*igo,*ls_seq);
pdesolver_seq->setParameters(inifile.sub("NewtonParameters"));
pdesolver_seq->setVerbosityLevel(verbose);
// --- Time Step Operators ---
Dune::PDELab::OneStepMethodResult osm_result;
if(osm){
osm_result = osm->result(); // cache old result
// --- Time Step Operators ---
Dune::PDELab::OneStepMethodResult osm_result;
if(osm_seq){
osm_result = osm_seq->result(); // cache old result
}
osm_seq = std::make_unique<OSMSeq>(alex2,*igo,*pdesolver_seq);
osm_seq->setResult(osm_result);
osm_seq->setVerbosityLevel(verbose+1);
}
osm = std::make_unique<OSM>(alex2,*igo,*pdesolver);
osm->setResult(osm_result);
osm->setVerbosityLevel(verbose+1);
gfs->update();
......@@ -102,11 +127,22 @@ bool Simulation<Traits>::compute_time_step ()
{
const RF t = controller->getTime();
const RF dt = controller->getDT();
pdesolver->setMaxIterations(controller->getIterations());
bool exception = false;
try
{
osm->apply(t,dt,*uold,*unew);
// solve in parallel
if (helper.size() > 1) {
pdesolver_par->setMaxIterations(controller->getIterations());
dwarn.push(false);
osm_par->apply(t,dt,*uold,*unew);
dwarn.pop();
}
// solve sequentially
else {
pdesolver_seq->setMaxIterations(controller->getIterations());
osm_seq->apply(t,dt,*uold,*unew);
}
*uold = *unew;
}
catch (Dune::PDELab::NewtonError &e){
......
......@@ -23,6 +23,14 @@ protected:
/// GFS Constraints Container
using CC = typename GFSHelper::CC;
/// LSGFS Helper
using LSGFSHelper = Dune::Dorie::GridFunctionSpaceHelper<GV,RF,0,Traits::GridGeometryType>;
/// Linear solver GFS
using LSGFS = typename LSGFSHelper::Type;
/// LSGFS Constraints Container
using LSCC = typename LSGFSHelper::CC;
// -- DORiE Class Definitions -- //
/// Parameter Interpolator
using Interpolator = Dune::Dorie::InterpolatorBase<Traits,Traits::dim>;
......@@ -56,14 +64,18 @@ protected:
using IGO = Dune::PDELab::OneStepGridOperator<GO0,GO1>;
/// Solution vector type
using U = typename IGO::Traits::Domain;
/// Linear solver type
using LS = Dune::PDELab::ISTLBackend_SEQ_SuperLU;
/// Non-linear solver type
using PDESOLVER = Dune::PDELab::Newton<IGO,LS,U>;
/// Linear solver types
using LSSeq = Dune::PDELab::ISTLBackend_SEQ_SuperLU;
using LSPar = Dune::PDELab::ISTLBackend_OVLP_AMG_4_DG<IGO,CC,LSGFS,LSCC,
Dune::PDELab::CG2DGProlongation,Dune::SeqSSOR,Dune::BiCGSTABSolver>;
/// Non-linear solver types
using PDESOLVERSeq = Dune::PDELab::Newton<IGO,LSSeq,U>;
using PDESOLVERPar = Dune::PDELab::Newton<IGO,LSPar,U>;
/// Time stepping scheme
using TimeStepScheme = Dune::PDELab::Alexander2Parameter<RF>;
/// Method computing the time step
using OSM = Dune::PDELab::OneStepMethod<RF,IGO,PDESOLVER,U,U>;
/// Methods computing the time step
using OSMSeq = Dune::PDELab::OneStepMethod<RF,IGO,PDESOLVERSeq,U,U>;
using OSMPar = Dune::PDELab::OneStepMethod<RF,IGO,PDESOLVERPar,U,U>;
// -- Utility Class Definitions -- //
/// VTK Output writer base class
......@@ -82,6 +94,9 @@ protected:
std::unique_ptr<GFS> gfs;
std::unique_ptr<CC> cc;
std::unique_ptr<LSGFS> lsgfs;
std::unique_ptr<LSCC> lscc;
std::unique_ptr<Interpolator> interp;
std::unique_ptr<Parameters> param;
std::unique_ptr<FlowBoundary> fboundary;
......@@ -96,9 +111,12 @@ protected:
std::unique_ptr<GO0> go0;
std::unique_ptr<GO1> go1;
std::unique_ptr<IGO> igo;
std::unique_ptr<LS> ls;
std::unique_ptr<PDESOLVER> pdesolver;
std::unique_ptr<OSM> osm;
std::unique_ptr<LSSeq> ls_seq;
std::unique_ptr<LSPar> ls_par;
std::unique_ptr<PDESOLVERSeq> pdesolver_seq;
std::unique_ptr<PDESOLVERPar> pdesolver_par;
std::unique_ptr<OSMSeq> osm_seq;
std::unique_ptr<OSMPar> osm_par;
TimeStepScheme alex2;
std::unique_ptr<U> uold;
......
......@@ -36,6 +36,71 @@ template<typename GridView, typename RF, int order, Dune::GeometryType::BasicTyp
struct GridFunctionSpaceHelper
{};
template<typename GridView, typename RF>
struct GridFunctionSpaceHelper<GridView,RF,0,Dune::GeometryType::BasicType::simplex>
{
private:
static constexpr int dim = GridView::dimension;
using DF = typename GridView::Grid::ctype;
public:
/// Entity set of the GFS
using ES = Dune::PDELab::OverlappingEntitySet<GridView>;
/// FiniteElementMap type of GFS
using FEM = typename Dune::PDELab::P0LocalFiniteElementMap<DF,RF,dim>;
/// Constraints type of the GFS
using CON = Dune::PDELab::P0ParallelConstraints;
/// GFS type
using Type = typename Dune::PDELab::GridFunctionSpace<ES,FEM,CON,
Dune::PDELab::istl::VectorBackend<Dune::PDELab::istl::Blocking::fixed>>;
/// GFS Constraints Container type
using CC = typename Type::template ConstraintsContainer<RF>::Type;
/// create GFS from GridView
static Type create (const GridView& gv)
{
ES es(gv);
Dune::GeometryType geo;
geo.makeSimplex(dim);
auto fem = std::make_shared<FEM>(geo);
auto con = std::make_shared<CON>();
return Type(es,fem,con);
}
};
/// Provide types and construction of the GridFunctionSpace
template<typename GridView, typename RF>
struct GridFunctionSpaceHelper<GridView,RF,0,Dune::GeometryType::BasicType::cube>
{
private:
static constexpr int dim = GridView::dimension;
using DF = typename GridView::Grid::ctype;
public:
/// Entity set of the GFS
using ES = Dune::PDELab::OverlappingEntitySet<GridView>;
/// FiniteElementMap type of GFS
using FEM = typename Dune::PDELab::P0LocalFiniteElementMap<DF,RF,dim>;
/// Constraints type of the GFS
using CON = Dune::PDELab::P0ParallelConstraints;
/// GFS type
using Type = typename Dune::PDELab::GridFunctionSpace<ES,FEM,CON,
Dune::PDELab::istl::VectorBackend<Dune::PDELab::istl::Blocking::fixed> >;
/// GFS Constraints Container type
using CC = typename Type::template ConstraintsContainer<RF>::Type;
/// create GFS from GridView
static Type create (const GridView& gv)
{
ES es(gv);
Dune::GeometryType geo;
geo.makeCube(dim);
auto fem = std::make_shared<FEM>(geo);
auto con = std::make_shared<CON>();
return Type(es,fem,con);
}
};
/// Provide types and construction of the GridFunctionSpace
/* \todo add constraints specialization depending on grid
*/
......@@ -46,21 +111,25 @@ private:
using DF = typename GridView::Grid::ctype;
public:
/// Entity set of the GFS
using ES = Dune::PDELab::OverlappingEntitySet<GridView>;
/// FiniteElementMap type of GFS
using FEM = typename Dune::PDELab::PkLocalFiniteElementMap<GridView,DF,RF,order>;
using FEM = typename Dune::PDELab::PkLocalFiniteElementMap<ES,DF,RF,order>;
/// Constraints type of the GFS
using CON = Dune::PDELab::P0ParallelConstraints;
/// GFS type
using Type = typename Dune::PDELab::GridFunctionSpace<GridView,FEM,
Dune::PDELab::P0ParallelConstraints,
Dune::PDELab::istl::VectorBackend<
Dune::PDELab::istl::Blocking::fixed> >;
using Type = typename Dune::PDELab::GridFunctionSpace<ES,FEM,CON,
Dune::PDELab::istl::VectorBackend<Dune::PDELab::istl::Blocking::fixed>>;
/// GFS Constraints Container type
using CC = typename Type::template ConstraintsContainer<RF>::Type;
/// create GFS from GridView
static Type create (const GridView& gv)
{
auto fem = std::make_shared<FEM>(gv);
return Type(gv,fem);
ES es(gv);
auto fem = std::make_shared<FEM>(es);
auto con = std::make_shared<CON>();
return Type(es,fem,con);
}
};
......@@ -73,21 +142,25 @@ private:
using DF = typename GridView::Grid::ctype;
public:
/// Entity set of the GFS
using ES = Dune::PDELab::OverlappingEntitySet<GridView>;
/// FiniteElementMap type of GFS
using FEM = typename Dune::PDELab::QkDGLocalFiniteElementMap<DF,RF,order,dim>;
/// Constraints type of the GFS
using CON = Dune::PDELab::P0ParallelConstraints;
/// GFS type
using Type = typename Dune::PDELab::GridFunctionSpace<GridView,FEM,
Dune::PDELab::P0ParallelGhostConstraints,
Dune::PDELab::istl::VectorBackend<
Dune::PDELab::istl::Blocking::fixed> >;
using Type = typename Dune::PDELab::GridFunctionSpace<ES,FEM,CON,
Dune::PDELab::istl::VectorBackend<Dune::PDELab::istl::Blocking::fixed> >;
/// GFS Constraints Container type
using CC = typename Type::template ConstraintsContainer<RF>::Type;
/// create GFS from GridView
static Type create (const GridView& gv)
{
ES es(gv);
auto fem = std::make_shared<FEM>();
return Type(gv,fem);
auto con = std::make_shared<CON>();
return Type(es,fem,con);
}
};
......
......@@ -219,11 +219,8 @@ namespace Dune {
/// Opens H5 array for reading
void open_array(std::string groupName)
{
if(helper.rank()==0)
{
h5file.open(groupName);
array_open = true;
}
h5file.open(groupName);
array_open = true;
}
/// Reads parameter array for Parameter p from H5 file
......@@ -232,12 +229,9 @@ namespace Dune {
if (!array_open)
DUNE_THROW(Exception,"Array file must be opened before reading a parameter");
if(helper.rank()==0)
{
Array fieldArray;
h5file.read_dataset(fieldArray, fieldCells, p.name);
p.set_values(fieldArray);
}
Array fieldArray;
h5file.read_dataset(fieldArray, fieldCells, p.name);
p.set_values(fieldArray);
}
/// Closes H5 file
......@@ -246,11 +240,8 @@ namespace Dune {
if (!array_open)
DUNE_THROW(Exception,"Array file already closed");
if(helper.rank()==0)
{
h5file.close();
array_open = false;
}
h5file.close();
array_open = false;
}
};
......
......@@ -4,6 +4,54 @@
namespace Dune {
namespace Dorie {
/// Check if parallel execution is allowed
/** Currently, we only allow parallel execution on YaspGrid due to
* constraints imposed by the linear solver
*/
template<typename GridType>
void check_parallel_allowed (const Dune::MPIHelper& helper)
{
if(helper.size() > 1
&& !std::is_same<YaspGrid<GridType::dimension>,GridType>::value) {
DUNE_THROW(Dune::Exception,"DORiE does not support parallel execution with this grid configuration!");
}
}
/// Specialized constructor for parallel YaspGrid construction
/** \param extensions Extensions of the grid
* \param elements Number of elements in each direction
* \return Shared pointer to the grid
*/
template<typename GridType>
std::enable_if_t<
std::is_same<YaspGrid<GridType::dimension>,GridType>::value,
std::shared_ptr<YaspGrid<GridType::dimension>> >
grid_cube_construction (const Dune::FieldVector<double,GridType::dimension>& extensions,
const std::array<unsigned int,GridType::dimension>& elements)
{
std::array<int,GridType::dimension> elements_;
std::copy(elements.cbegin(),elements.cend(),elements_.begin());
return std::make_shared<YaspGrid<GridType::dimension>>(extensions,elements_);
}
/// Default rectangular grid constructor. Call StructuredGridFactory
/** \param extensions Extensions of the grid
* \param elements Number of elements in each direction
* \return Shared pointer to the grid
*/
template<typename GridType>
std::enable_if_t<
!std::is_same<YaspGrid<GridType::dimension>,GridType>::value,
std::shared_ptr<GridType> >
grid_cube_construction (const Dune::FieldVector<double,GridType::dimension>& extensions,
const std::array<unsigned int,GridType::dimension>& elements)
{
const Dune::FieldVector<double,GridType::dimension> origin(.0);
return Dune::StructuredGridFactory<GridType>::createCubeGrid(origin,extensions,elements);
}
/// Read simplex Gmsh file in 2D or 3D, create a grid, and return a shared pointer to the grid
/** Refine the grid globally and call default load balancing for multi-processing support.
* \tparam GridType Type of the grid. Must be supported by Dune::GridFactory
......@@ -13,13 +61,14 @@ namespace Dorie {
template<class GridType>
std::shared_ptr<GridType> build_grid_gmsh (const Dune::ParameterTree& inifile, const Dune::MPIHelper& helper)
{
check_parallel_allowed<GridType>(helper);
Dune::Timer timer;
ios_base_all_saver restore(std::cout);
enum { dim = GridType::dimension };
const int verbose = inifile.get<int>("output.verbose");
const int level = inifile.get<int>("grid.initialLevel");
const std::string meshfilename = inifile.get<std::string>("grid.gridFile");
typedef std::vector<int> GmshIndexMap;
if(verbose>0 && helper.rank()==0){
std::cout << "GRID SETUP:" << std::endl;
......@@ -29,14 +78,17 @@ std::shared_ptr<GridType> build_grid_gmsh (const Dune::ParameterTree& inifile, c
}
auto grid = std::make_shared<GridType>();
GmshIndexMap boundary_index_map;
GmshIndexMap element_index_map;
Dune::GridFactory<GridType> factory(grid.get());
Dune::GmshReader<GridType>::read(factory,meshfilename,boundary_index_map,element_index_map,verbose>2?true:false,false);
factory.createGrid();
grid->globalRefine(level);
if(helper.rank() == 0){
std::vector<int> boundary_index_map;
std::vector<int> element_index_map;
Dune::GmshReader<GridType>::read(factory,meshfilename,boundary_index_map,element_index_map,verbose>2?true:false,false);
}
factory.createGrid();
grid->loadBalance();
grid->globalRefine(level);
if(verbose>0 && helper.rank()==0){
if (verbose>2) std::cout << "--- GMSH READER OUTPUT END ---" << std::endl;
......@@ -59,6 +111,8 @@ std::shared_ptr<GridType> build_grid_gmsh (const Dune::ParameterTree& inifile, c
template<class GridType>
std::shared_ptr<GridType> build_grid_cube (const Dune::ParameterTree& inifile, const Dune::MPIHelper& helper)
{
check_parallel_allowed<GridType>(helper);
Dune::Timer timer;
ios_base_all_saver restore(std::cout);
enum { dim = GridType::dimension };
......@@ -68,7 +122,7 @@ std::shared_ptr<GridType> build_grid_cube (const Dune::ParameterTree& inifile, c
const Dune::FieldVector<double,dim> upperRight = inifile.get<Dune::FieldVector<double,dim>>("grid.extensions");
const Dune::array<unsigned int,dim> elements = inifile.get<Dune::array<unsigned int,dim>>("grid.cells");
auto grid = Dune::StructuredGridFactory<GridType>::createCubeGrid(lowerLeft,upperRight,elements);
auto grid = grid_cube_construction<GridType>(upperRight,elements);
grid->globalRefine(level);
grid->loadBalance();
......
......@@ -30,14 +30,22 @@ namespace Dune {
if(fileOpen)
close();
// set up property list for collective I/O
hid_t h5_plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(h5_plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
assert(h5_plist_id > -1);
// open the file for reading
h5_file_id = H5Fopen(filePath.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
h5_file_id = H5Fopen(filePath.c_str(), H5F_ACC_RDONLY, h5_plist_id);
assert(h5_file_id > -1);
// open the group
h5_group_id = H5Gopen(h5_file_id, group_name.c_str(), H5P_DEFAULT);
assert(h5_group_id > -1);
// release properties
H5Pclose(h5_plist_id);
fileOpen = true;
}
......@@ -97,14 +105,15 @@ namespace Dune {
local_data.resize(local_size);
// set up the collective transfer properties list
hid_t xferPropList = H5Pcreate(H5P_DATASET_XFER);
assert(xferPropList > -1);
hid_t h5_plist_id = H5Pcreate(H5P_DATASET_XFER);
H5Pset_dxpl_mpio(h5_plist_id, H5FD_MPIO_COLLECTIVE);
assert(h5_plist_id > -1);
status = H5Dread(dataset_id
, H5T_NATIVE_DOUBLE //image.DataType
, memspace_id
, dataspace_id // H5S_ALL //
, xferPropList //H5P_DEFAULT //
, h5_plist_id
, &(local_data[0])
);
assert(status>-1);
......@@ -116,6 +125,7 @@ namespace Dune {
H5Dclose(dataset_id);
H5Sclose(dataspace_id);
H5Sclose(memspace_id);
H5Pclose(h5_plist_id);
}
void close()
......
......@@ -19,7 +19,14 @@ def evaluate(iniinfo,runtime):
raise ValueError("The reference evaluator assumes the key _reference.path to be existent in the inifile")
if not os.path.isfile(reference):
raise RuntimeError("Reference file {} does not exist".format(reference))
try:
reference_new = iniinfo["_reference.path"] + "/" + iniinfo["_reference.name"] + ".pvd"
except KeyError:
raise ValueError("Since there is not a single reference file found that matches the output filename, "
"The reference evaluator assumes the keys _reference.path and _reference.name to be existent in the inifile")
vtks_ref = pvd_reader.read(reference_new)
reference = vtks_ref[-1].path
vtkfile_run = vtkfile.VTKFile(vtks[-1].path)
vtkfile_run.read()
......
......@@ -12,6 +12,26 @@ class BaseGrid(object):
_cellValues = () #: values of the cell variables
_dataDict = None #: dictionary holding variable data as key-value pairs
def merge(self, other):
"""
Merge two grids into a new grid instance
"""
if self.dim != other.dim:
raise RuntimeError("Grid dimensions do not match!")
self._calcCellCenters()
other._calcCellCenters()
new = self._merge_derived(other)
new._calcCellCenters()
for var in self._cellVariables:
data = np.append(self.data()[var],other.data()[var],axis=0)
new.add_data(var,data)
return new
def cellCenters(self):
"""
:returns: The cell centers of the grid.
......@@ -56,6 +76,10 @@ class BaseGrid(object):
def _calcCellCenters(self):
return NotImplementedError
def _merge_derived(self,other):
return NotImplementedError
class RegularGrid(BaseGrid):
"""
Class implementing a grid interface for given cell centers on a regular grid.
......@@ -76,6 +100,12 @@ class RegularGrid(BaseGrid):
self._cellCenters = cellCenters
def _merge_derived(self,other):
"""
Merge this grid with another into a new regular grid instance
"""
cellCenters = np.append(self.cellCenters(),other.cellCenters(),axis=0)
return RegularGrid(cellCenters)
class UnstructuredVTKGrid(BaseGrid):
......@@ -124,6 +154,19 @@ class UnstructuredVTKGrid(BaseGrid):
self._numCells = int(len(self.connectivity) / self.ppc)
def _merge_derived(self, other):
"""
Merge this grid with another into a new unstructured grid instance
"""
if self.ppc != other.ppc:
raise RuntimeError("Using different numbers of points per grid cell")
points_n = np.append(self.points,other.points,axis=0)
connectivity_n = np.append(self.connectivity,other.connectivity + len(self.points),axis=0)
return UnstructuredVTKGrid(points_n,connectivity_n,self.ppc)
def triangulation(self):
"""
:returns: Cell point coordinates x, y, z and connectivity c as (x, y, z), c
......
......@@ -6,6 +6,7 @@ import numpy as np
import warnings
from dorie.utilities.grid import UnstructuredVTKGrid
from dorie.utilities.grid import BaseGrid
from dorie.utilities.warnings import VTKWarning
from dorie.utilities import check_path
......@@ -56,14 +57,42 @@ class VTKFile:
grid = self._read_vtr(xml_root, variables)
elif vtk_type == "UnstructuredGrid":
grid = self._read_vtu(xml_root, variables)
elif vtk_type == "PUnstructuredGrid":
grid = self._read_pvtu(xml_root, variables)
else:
raise RuntimeError("Problem reading {0}: XML root attribute 'type' must "
"either be 'StructuredGrid' or 'UnstructuredGrid' "
"(is: {1})".format(self.path,root.attrib["type"]))
"(is: {1})".format(self.path,xml_root.attrib["type"]))
self.grid = grid
return grid
def _read_pvtu(self, root, variables):
"""
Reads an merges the peace files given by a parallel collection .pvtu file.
This function assumes that the grids can simply be stacked.
Calls VTKFile.read() for any peace found.
"""
out = None
for grid in root:
for layer in grid:
if layer.tag == "Piece":
filename = os.path.join(os.path.split(self.path)[0],layer.attrib["Source"])
if os.path.isfile(filename):
print("adding parallel piece {} to file collection".format(filename))
piece_file = VTKFile(filename)
if out == None:
out = piece_file.read(variables)
else: