Commit eb60c8c1 authored by Dion Häfner's avatar Dion Häfner

Merge branch 'feature/testing' into 'master'

Construct tests that actually pass

To-do:
---
- [x] Re-write ODE evaluator to use parameter field files
- [x] Create parameter fields for reference and muPhi evaluators
- [x] Finish correlation evaluator
- [x] Remove or enhance convergence evaluator
- [x] Write parfield evaluator
- [ ] Adjust `dorie.testtools` documentation
- [x] Add flux test to ODE evaluator
- [x] Test evaporation
- [x] Add tests for `dorie create` and `dorie plot`
- [x] Test interpolators
- [x] Check for warnings
- [x] Test with Python 2.7 and 3.x
- [x] Test parallel RFG
- [x] Hunt warnings
- [x] ODE tests for evaporation

See merge request !4
parents f317223b 6e583827
......@@ -12,7 +12,7 @@ stages:
build:main:
script:
- ./dune-common/bin/dunecontrol --only=dorie all
- MAKE_FLAGS="-j 2" ./dune-common/bin/dunecontrol --only=dorie all
- export PATH=/opt/dune/dorie/build-cmake/bin:$PATH
- ARGS="--output-on-failure -j 2" ./dune-common/bin/dunecontrol --only=dorie make test
- ./dune-common/bin/dunecontrol --only=dorie make doc
......@@ -23,15 +23,33 @@ build:main:
- $CI_PROJECT_DIR/build-cmake/doc/html/
expire_in: 1 week
build:python3:
script:
- cd /opt/dune/venv
- virtualenv dorie3 --py=/usr/bin/python3
- source /opt/dune/venv/dorie3/bin/activate
- python -m pip install virtualenv
- cd /opt/dune
- ./dune-common/bin/dunecontrol exec "rm -rf build-cmake"
- MAKE_FLAGS="-j 2" ./dune-common/bin/dunecontrol all
- export PATH=/opt/dune/dorie/build-cmake/bin:$PATH
- ARGS="--output-on-failure -j 2" ./dune-common/bin/dunecontrol --only=dorie make test
stage: build
build:update_dune:
script:
- ./dune-common/bin/dunecontrol update || true
- ./dune-common/bin/dunecontrol exec "rm -rf build-cmake"
- ./dune-common/bin/dunecontrol all
- MAKE_FLAGS="-j 2" ./dune-common/bin/dunecontrol all
- export PATH=/opt/dune/dorie/build-cmake/bin:$PATH
- ARGS="--output-on-failure -j 2" ./dune-common/bin/dunecontrol --only=dorie make test
stage: build
build:debug:
script:
- CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=Debug" ./dune-common/bin/dunecontrol --only=dorie all
stage: build
deploy:docs:
script:
- cd $CI_PROJECT_DIR && bash .deploy_docs
......
......@@ -10,7 +10,7 @@ string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
if(CMAKE_BUILD_TYPE_UPPER MATCHES DEBUG)
set(CMAKE_VERBOSE_MAKEFILE ON)
endif()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Wall -Werror")
#
if(NOT (dune-common_DIR OR dune-common_ROOT OR
......
......@@ -25,7 +25,11 @@ MPIEXEC_POSTFLAGS = "@MPIEXEC_POSTFLAGS@"
DORIE_EXECUTABLE = os.path.join(DORIEDIR, "dune/dorie/dorie")
PARAMETERDIR = os.path.join(DORIEDIR, "doc/default_files")
DORIE_PYTHON = os.path.join(DORIEDIR, "dune-env")
MPIRUN = lambda nproc, exe, *args: [k for k in [MPIEXEC,MPIEXEC_NUMPROC_FLAG,str(nproc),MPIEXEC_PREFLAG,str(exe),MPIEXEC_POSTFLAGS] + list(args) if k]
def MPIRUN(nproc,exe,*args,**kwargs):
mpi_flags = kwargs.get("mpi_flags") or []
return [k for k in [MPIEXEC,MPIEXEC_NUMPROC_FLAG,str(nproc)] \
+ mpi_flags + [MPIEXEC_PREFLAG,str(exe),
MPIEXEC_POSTFLAGS] + list(args) if k]
def run(args):
if not os.path.isfile(args["config"]):
......@@ -34,7 +38,8 @@ def run(args):
if args["parallel"] == 1:
subprocess.check_call([DORIE_EXECUTABLE, args["config"]])
else:
subprocess.check_call(MPIRUN(args["parallel"],DORIE_EXECUTABLE,args["config"]))
subprocess.check_call(MPIRUN(args["parallel"], DORIE_EXECUTABLE,
args["config"], mpi_flags=args["mpi_flags"]))
except subprocess.CalledProcessError:
print("Error while running DORiE")
sys.exit(1)
......@@ -52,18 +57,31 @@ def create(args):
def pfg(args):
if not os.path.isfile(args["config"]):
raise IOError("Configuration file {} not found".format(args["config"]))
if args["parallel"] == 1:
subprocess.call([DORIE_PYTHON, "pf_from_file.py", args["config"]])
else:
subprocess.call([DORIE_PYTHON, "pf_from_file.py", args["config"], "--parallel", str(args["parallel"])])
try:
if args["parallel"] == 1:
subprocess.check_call([DORIE_PYTHON, "pf_from_file.py", args["config"]])
else:
if args["mpi_flags"]:
mpi_flags = ["--mpi-flags=" + f for f in args["mpi_flags"]]
else:
mpi_flags = []
subprocess.check_call([DORIE_PYTHON, "pf_from_file.py", args["config"],
"--parallel", str(args["parallel"])] + mpi_flags)
except subprocess.CalledProcessError:
print("Error while running DORiE")
sys.exit(1)
def plot(args):
if not os.path.isfile(args["vtk"]):
raise IOError("File {} not found".format(args["vtk"]))
if args["var"]:
subprocess.call([DORIE_PYTHON, "plot_vtk.py", "-f", args["vtk"], "--var", args["var"]])
else:
subprocess.call([DORIE_PYTHON, "plot_vtk.py", "-f", args["vtk"]])
try:
if args["var"]:
subprocess.check_call([DORIE_PYTHON, "plot_vtk.py", "-f", args["vtk"], "--var", args["var"]])
else:
subprocess.check_call([DORIE_PYTHON, "plot_vtk.py", "-f", args["vtk"]])
except subprocess.CalledProcessError:
print("Error while running DORiE")
sys.exit(1)
if __name__ == "__main__": # parse command line and call command handler
try:
......@@ -83,7 +101,7 @@ if __name__ == "__main__": # parse command line and call command handler
parser_pfg = subparsers.add_parser('pfg', help="Start parameter field generator.",
description="Start parameter field generator.",
usage="%(prog)s <config> [-h] [-p [N]]")
usage="%(prog)s <config> [-h] [-p [N]] [-m=MPI_FLAGS]")
parser_pfg.add_argument('config',
help="Configuration file for the parameter field generator. "
"Can be created with 'dorie create'.")
......@@ -91,15 +109,21 @@ if __name__ == "__main__": # parse command line and call command handler
const=multiprocessing.cpu_count(), type=int, required=False,
help="Run in parallel on N processes. "
"If N is not specified, run on all available CPU threads.")
parser_pfg.add_argument('-m','--mpi-flags', action="append", required=False,
help="Additional flags that are passed to mpirun when run in parallel. "
"May be specified multiple times.")
parser_pfg.set_defaults(func=pfg)
parser_run = subparsers.add_parser('run', help="Run DORiE.", description="Run DORiE.",
usage="%(prog)s <config> [-h] [-p [N]]")
usage="%(prog)s <config> [-h] [-p [N]] [-m=MPI_FLAGS]")
parser_run.add_argument('config',help="DORiE configuration file. Can be created with 'dorie create'.")
parser_run.add_argument('-p','--parallel', metavar='N', nargs='?', default=1,
const=multiprocessing.cpu_count(), type=int, required=False,
help="Run in parallel on N processes. "
"If N is not specified, run on all available CPU threads.")
parser_run.add_argument('-m','--mpi-flags', action="append", required=False,
help="Additional flags that are passed to mpirun when run in parallel. "
"May be specified multiple times.")
parser_run.set_defaults(func=run)
parser_plot = subparsers.add_parser('plot', help="Plot a preview of a VTK file created by DORiE.",
......@@ -110,7 +134,10 @@ if __name__ == "__main__": # parse command line and call command handler
help="Plot only given variables. If not specified, all variables are plotted.")
parser_plot.set_defaults(func=plot)
args = parser.parse_args()
try:
args = parser.parse_args()
except SystemExit:
sys.exit(1)
args.func(vars(args))
except Exception as e:
print("dorie failed with ({0}) warning(s) and ({1}) error(s)".format(len(warn),1))
......
......@@ -170,6 +170,15 @@ adding an empty line, make text **bold** or ``monospaced``.
<suggestion> false </suggestion>
</parameter>
<parameter name="dimensions">
<definition>
Spatial dimension of the created field. 3-dimensional fields are only supported
by the fft and hdf5 generators.
</definition>
<values> 2, 3 </values>
<suggestion> 2 </suggestion>
</parameter>
<parameter name="variance">
<definition>
Variance of the resulting field, if ``millerSimilarity`` is used.
......@@ -188,14 +197,6 @@ adding an empty line, make text **bold** or ``monospaced``.
</category>
<category name="generator.fft">
<parameter name="dimensions">
<definition>
Spatial dimension of the created field.
</definition>
<values> 2, 3 </values>
<suggestion> 2 </suggestion>
</parameter>
<parameter name="outputPath">
<definition>
Path to the output folder of the generated field. Note that this only accepts
......
......@@ -16,11 +16,6 @@
#ifndef DUNE_GEOINVERSION_HDF5_TOOLS_HH
#define DUNE_GEOINVERSION_HDF5_TOOLS_HH
#include <dune/pdelab/common/geometrywrapper.hh>
#include <assert.h>
#include <sstream>
namespace Dune {
namespace Dorie {
class H5Tools{
......
......@@ -13,11 +13,6 @@
#ifndef DUNE_GEOINVERSION_HDF5_TOOLS_HH
#define DUNE_GEOINVERSION_HDF5_TOOLS_HH
#include <dune/pdelab/common/geometrywrapper.hh>
#include <assert.h>
#include <sstream>
namespace Dune {
namespace Dorie {
class H5Tools{
......
......@@ -17,13 +17,20 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <assert.h>
#include <sstream>
// DUNE includes
// Do not treat DUNE warnings as errors
#pragma GCC diagnostic push
#pragma GCC diagnostic warning "-Wall"
#include <dune/common/parallel/mpihelper.hh>
#include <dune/common/parametertreeparser.hh>
#include <dune/common/fvector.hh>
#include <dune/geometry/type.hh>
#include <dune/grid/common/gridenums.hh>
#include <dune/pdelab/common/geometrywrapper.hh>
#pragma GCC diagnostic pop
// dorie-rfg includes
#include <dune/dorie-rfg/datatypes.hh>
......@@ -56,7 +63,7 @@ int main(int argc, char** argv)
Dune::ParameterTree inifile;
Dune::ParameterTreeParser ptreeparser;
ptreeparser.readINITree(inifilename,inifile);
const unsigned int dim = inifile.get<unsigned int>("generator.fft.dimensions");
const unsigned int dim = inifile.get<unsigned int>("generator.dimensions");
// Attempt to create output directory
const std::string outputPath = inifile.get<std::string>("generator.fft.outputPath");
......
......@@ -105,7 +105,7 @@ int main(int argc, char** argv)
const bool adaptivity = inifile.get<bool>("adaptivity.useAdaptivity");
// Attempt to create output directory
int status = mkdir(outputPath.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
mkdir(outputPath.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
int result = access(outputPath.c_str(), W_OK);
if (result != 0)
DUNE_THROW(Dune::IOError,"Output folder " << outputPath << " not writable");
......
......@@ -2,6 +2,7 @@
#define DORIE_HH
#include <iostream>
#include <iomanip>
#include <vector>
#include <map>
#include <string>
......@@ -15,6 +16,12 @@
#include <unistd.h>
#include <hdf5.h>
// Do not treat DUNE warnings as errors
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wall"
#if __GNUC__ > 5
#pragma GCC diagnostic ignored "-Wmisleading-indentation"
#endif
#include <dune/common/fvector.hh>
#include <dune/common/fmatrix.hh>
#include <dune/common/ios_state.hh>
......@@ -44,8 +51,10 @@
#include <dune/pdelab/gridoperator/gridoperator.hh>
#include <dune/pdelab/gridoperator/onestep.hh>
#include <dune/pdelab/instationary/onestep.hh>
#include <dune/pdelab/instationary/onestepparameter.hh>
#include <dune/pdelab/newton/newton.hh>
#include <dune/pdelab/stationary/linearproblem.hh>
#pragma GCC diagnostic pop
#include <dune/dorie/solver.hh>
#include <dune/dorie/interface.hh>
......
......@@ -86,7 +86,7 @@ public:
}
/// Adapt the grid according to the estimated flux error and the strategy applied
/**
/**
* \param grid Grid to adapt
* \param gv Leaf grid view of the grid
* \param gfs Solution GridFunctionSpace
......@@ -110,9 +110,8 @@ public:
Dune::Timer timer3;
float t_setup,t_est,t_sqrt,t_strtgy,t_mark,t_adapt;
if(verbose>1 /*&& strategy == "targetTolerance"*/){
if(verbose>1)
std::cout << " Refinement Step " << multiRefinementCounter << ": ";
}
double eta_alpha(0.0);
double eta_beta(0.0);
......@@ -138,12 +137,8 @@ public:
native(eta)[i] = sqrt(native(eta)[i]); // eta contains squares
if (native(eta)[i] > maxeta) maxeta = native(eta)[i];
}
if (verbose>1) {
if (verbose>1)
std::cout << "Largest Local Error: " << maxeta << " " << std::endl;
if (maxeta < adaptivityThreshold) {
std::cout << " Max local error smaller than threshold " << adaptivityThreshold << ". Skipping grid refinement." << std::endl;
}
}
t_sqrt = timer3.elapsed();
timer3.reset();
......@@ -157,9 +152,9 @@ public:
// Apply marking strategy
if (strategy == "elementFraction")
Dune::PDELab::element_fraction( eta, alpha, beta, eta_alpha, eta_beta, verbose-1 );
Dune::PDELab::element_fraction(eta, alpha, beta, eta_alpha, eta_beta, verbose-1);
else if (strategy == "errorFraction")
Dune::PDELab::error_fraction( eta, alpha, beta, eta_alpha, eta_beta, verbose-1 );
Dune::PDELab::error_fraction(eta, alpha, beta, eta_alpha, eta_beta, verbose-1);
else{ //((strategy == "threshold") || (strategy == "targetTolerance")) {
eta_alpha = alpha;
eta_beta = beta; }
......@@ -168,15 +163,18 @@ public:
timer3.reset();
// Skip refinement if threshold is met, but still allow coarsening
if (maxeta < adaptivityThreshold)
if (maxeta < adaptivityThreshold) {
eta_alpha = maxeta + 1; // Only refine elements with error > maxerror + 1
if (verbose>1)
std::cout << " Max local error smaller than threshold " << adaptivityThreshold << ". Skipping grid refinement." << std::endl;
}
Dune::PDELab::mark_grid( grid, eta, eta_alpha, eta_beta, minLevel, maxLevel, verbose-1);
Dune::PDELab::mark_grid(grid, eta, eta_alpha, eta_beta, minLevel, maxLevel, verbose-1);
t_mark = timer3.elapsed();
timer3.reset();
Dune::PDELab::adapt_grid( grid, gfs, uold, unew, (order) * 2 );
Dune::PDELab::adapt_grid(grid, gfs, uold, unew, 2*order);
t_adapt = timer3.elapsed();
timer3.reset();
......@@ -228,7 +226,7 @@ private:
public:
/// Create the factory, taking the parameters for building an AdaptivityHandler
/**
/**
* \param _inifile Parameter file parser
* \param _grid Grid to adapt (reference is not saved)
*/
......
......@@ -4,12 +4,6 @@
#ifndef DUNE_DORIE_INSTATIONARY_IMPLICITONESTEP_HH
#define DUNE_DORIE_INSTATIONARY_IMPLICITONESTEP_HH
#include <iostream>
#include <iomanip>
#include <dune/common/ios_state.hh>
#include <dune/pdelab/instationary/onestepparameter.hh>
namespace Dune {
namespace Dorie {
using namespace Dune::PDELab;
......
......@@ -130,10 +130,9 @@ public:
Dune::PDELab::NumericalJacobianSkeleton<RichardsDGSpatialOperator<Traits,Parameter,Boundary,SourceTerm,FEM,adjoint> >(1.e-7),
Dune::PDELab::NumericalJacobianBoundary<RichardsDGSpatialOperator<Traits,Parameter,Boundary,SourceTerm,FEM,adjoint> >(1.e-7),
param(param_), boundary(boundary_), sourceTerm(sourceTerm_), method(method_), weights(weights_),
penalty_factor(config.get<RF>("dg.penaltyFactor")),
penalty_factor(config.get<RF>("dg.penaltyFactor")), mapper(view_),
intorderadd(intorderadd_), quadrature_factor(quadrature_factor_),
cache(20),
mapper(view_)
cache(20)
{
theta = 1.;
if (method == RichardsDGMethod::SIPG) theta = -1.;
......
......@@ -201,7 +201,7 @@ namespace Dune {
template<typename OUT>
struct H5File::AttributeReader {
static OUT read(const hid_t attr_id, const hid_t atype_mem, const hid_t rank, const std::vector<hsize_t> dims)
static OUT read(const hid_t attr_id, const hid_t atype_mem, const hsize_t rank, const std::vector<hsize_t> dims)
{
std::vector<hsize_t>::size_type flat_size = 1;
for(std::vector<hsize_t>::size_type i=0; i<rank; i++)
......@@ -218,7 +218,7 @@ namespace Dune {
template<>
struct H5File::AttributeReader<std::string> {
static std::string read(const hid_t attr_id, const hid_t atype_mem, const hid_t rank, const std::vector<hsize_t> dims)
static std::string read(const hid_t attr_id, const hid_t atype_mem, const hsize_t rank, const std::vector<hsize_t> dims)
{
if(rank > 1)
DUNE_THROW(Exception,"Cannot convert attribute with rank " << rank << " to string");
......@@ -233,7 +233,7 @@ namespace Dune {
template<typename A>
struct H5File::AttributeReader<std::vector<A>> {
static std::vector<A> read(const hid_t attr_id, const hid_t atype_mem, const hid_t rank, const std::vector<hsize_t> dims)
static std::vector<A> read(const hid_t attr_id, const hid_t atype_mem, const hsize_t rank, const std::vector<hsize_t> dims)
{
std::vector<hsize_t>::size_type flat_size = 1;
for(std::vector<hsize_t>::size_type i=0; i<rank; i++)
......@@ -250,7 +250,7 @@ namespace Dune {
template<typename A, int B>
struct H5File::AttributeReader<Dune::FieldVector<A,B>> {
static Dune::FieldVector<A,B> read(const hid_t attr_id, const hid_t atype_mem, const hid_t rank, const std::vector<hsize_t> dims)
static Dune::FieldVector<A,B> read(const hid_t attr_id, const hid_t atype_mem, const hsize_t rank, const std::vector<hsize_t> dims)
{
std::vector<hsize_t>::size_type flat_size = 1;
for(std::vector<hsize_t>::size_type i=0; i<rank; i++)
......@@ -267,7 +267,7 @@ namespace Dune {
template<typename A, int B>
struct H5File::AttributeReader<std::array<A,B>> {
static std::array<A,B> read(const hid_t attr_id, const hid_t atype_mem, const hid_t rank, const std::vector<hsize_t> dims)
static std::array<A,B> read(const hid_t attr_id, const hid_t atype_mem, const hsize_t rank, const std::vector<hsize_t> dims)
{
std::vector<hsize_t>::size_type flat_size = 1;
for(std::vector<hsize_t>::size_type i=0; i<rank; i++)
......
......@@ -9,6 +9,7 @@ except NameError: # Python 3.x
raw_input = input
from dorie.parfield import configparser
from dorie.parfield.parameter_file import write_parameter_file
from dorie.parfield.parameterization import parameterizations
from dorie.utilities.text_to_bool import text_to_bool
......@@ -41,6 +42,7 @@ class BaseConverter(object):
out_path = self.read_parameter('general','outputFile')
overwrite = text_to_bool(self.read_parameter('general','overwrite'))
millerSimilarity = text_to_bool(self.read_parameter('generator','millerSimilarity'))
self.dim = int(self.read_parameter('generator','dimensions'))
self.data_dict = self.read_data(input_file,millerSimilarity)
self.write(out_path,overwrite)
......@@ -54,42 +56,16 @@ class BaseConverter(object):
raise NotImplementedError
def write(self,out_path,overwrite=False):
"""
Write the data in :attr:`data_dict` to an HDF5 file. Also appends an
attribute to the HDF5 file containing the name of the used parameterization.
Generated file hierarchy:
::
Dataset /parameters/<parameterizationName>/<parameterName>
Attribute /parameters/parameterization
:param out_path: Path to the created h5 file
:param overwrite: If true, overwrite an existing file without asking
"""
if not self.data_dict:
raise RuntimeError("Encountered empty data array (call read_data first)")
if os.path.isfile(out_path) and not overwrite:
ow = raw_input("The output file {} already exists. Overwrite? [y/N] ".format(out_path))
if not ow.lower() in ["y","yes"]:
raise RuntimeError("Aborting")
if len(set(arr.shape for arr in self.data_dict.values())) != 1:
raise RuntimeError("All parameter arrays must have the same shape")
# assemble attributes
for v in self.data_dict.values():
if v.ndim != self.dim:
raise ValueError("Data shape ({}) does not match field dimensions ({})".format(", ".join(v.shape), self.dim))
h5_attrs = self._get_attributes()
# write HDF5 file
with h5py.File(out_path, 'w') as f:
g = f.create_group("parameters")
g.attrs.update(h5_attrs)
h = g.create_group(self.ParClass.classname)
for key in self.data_dict.keys():
h.create_dataset(key, data=self.data_dict[key])
groupname = self.ParClass.classname
write_parameter_file(groupname, self.data_dict, out_path,
attrs=h5_attrs, overwrite=overwrite)
def create_parclass(self,n):
"""
......@@ -115,10 +91,12 @@ class BaseConverter(object):
raise RuntimeError("Missing option {}.{} in parameter file".format(section,key))
def _get_attributes(self):
attr_dict = {
"parameterization": np.string_(self.ParClass.classname),
extensions = np.array(self.read_parameter("generator","extensions").split(), dtype=np.float64)
if not extensions.size == self.dim:
raise ValueError("Extensions do not match field dimensions")
attr = {
"millerSimilarity": np.bool(text_to_bool(self.read_parameter('generator','millerSimilarity'))),
"variance": np.float64(self.read_parameter("generator","variance")),
"extensions": np.array(self.read_parameter("generator","extensions").split(), dtype=np.float64)
"extensions": extensions
}
return attr_dict
return attr
......@@ -39,21 +39,25 @@ class H5Converter(BaseConverter):
if "/rawField" in f:
raw_array = np.array(f.get("/rawField"), dtype=np.float64)
else:
raw_array = np.zeros_like(media_array, dtype=np.float64)
raw_array = np.zeros_like(media_array)
# check how many different media we are dealing with
uq_media = np.sort(np.unique(media_array.astype(int)))
if uq_media != range(1,np.max(uq_media) + 1):
if not np.array_equal(uq_media,np.arange(1,np.max(uq_media) + 1)):
raise IOError("Media numbering must be continuous, starting from 1")
p_dict = {i: self.create_parclass(i).parameters() for i in uq_media}
p = [self.create_parclass(i).parameters() for i in uq_media]
# create parameter fields
parField = {"raw_field": raw_array}
for pn in self.ParClass.names:
parField[pn] = np.zeros_like(media_array, dtype=float)
parField[pn] = np.zeros_like(media_array)
for i in uq_media:
mask = np.logical_and(media_array >= i, media_array < i + 1)
parField[pn][mask] = p[i][pn] * (media_array[mask] - i) \
+ p[i+1][pn] * (i + 1 - media_array[mask])
mask = media_array == i
parField[pn][mask] = p[i-1][pn]
mask = np.logical_and(media_array > i, media_array < i + 1)
if np.any(mask):
parField[pn][mask] = p[i-1][pn] * (media_array[mask] - i) \
+ p[i][pn] * (i + 1 - media_array[mask])
return parField
import os
import h5py
import numpy as np
def read_parameter_file(in_path):
data_dict = {}
with h5py.File(in_path, 'r') as f:
attrs = dict(f["parameters"].attrs)
for k,v in attrs.items():
if isinstance(v,bytes):
attrs[k] = v.decode("utf-8")
group = attrs["parameterization"]
for key in f["parameters/{}".format(group)].keys():
data_dict[key] = np.array(f["parameters/{}/{}".format(group,key)])
return data_dict, attrs
def write_parameter_file(group,data_dict,out_path,attrs=None,overwrite=False):
"""
Write the data in :attr:`data_dict` to an HDF5 file. Also appends an
attribute to the HDF5 file containing the name of the used parameterization.
Generated file hierarchy:
::
Dataset /parameters/<parameterizationName>/<parameterName>
Attributes /parameters/<attributeName>
:param out_path: Path to the created h5 file
:param overwrite: If true, overwrite an existing file without asking
"""
if os.path.isfile(out_path) and not overwrite:
ow = raw_input("The output file {} already exists. Overwrite? [y/N] ".format(out_path))
if not ow.lower() in ["y","yes"]:
raise RuntimeError("Aborting")
if len(set(arr.shape for arr in data_dict.values())) != 1:
raise RuntimeError("All parameter arrays must have the same shape")
# write HDF5 file
with h5py.File(out_path, 'w') as f:
g = f.create_group("parameters")
g.attrs["parameterization"] = np.string_(group)
if attrs:
g.attrs.update(attrs)
h = g.create_group(group)
for key in data_dict.keys():
h.create_dataset(key, data=data_dict[key], compression="gzip")
......@@ -25,7 +25,11 @@ MPIEXEC_POSTFLAGS = "@MPIEXEC_POSTFLAGS@"
#
RFG_EXEC = os.path.join(DORIEDIR, "dune/dorie-rfg/dorie-rfg")
MPIRUN = lambda nproc, exe, *args: [k for k in [MPIEXEC,MPIEXEC_NUMPROC_FLAG,str(nproc),MPIEXEC_PREFLAG,str(exe),MPIEXEC_POSTFLAGS] + list(args) if k]
def MPIRUN(nproc,exe,*args,**kwargs):
mpi_flags = kwargs.get("mpi_flags") or []
return [k for k in [MPIEXEC,MPIEXEC_NUMPROC_FLAG,str(nproc)] \
+ mpi_flags + [MPIEXEC_PREFLAG,str(exe),
MPIEXEC_POSTFLAGS] + list(args) if k]
if __name__ == "__main__":
try: # catch all exceptions we we can output an error message
......@@ -35,6 +39,9 @@ if __name__ == "__main__":
parser.add_argument('param',help='The configuration file holding all parameterization information')
parser.add_argument('-p','--parallel', metavar='N', nargs='?', default=1, const=multiprocessing.cpu_count(), type=int, required=False,
help="Run in parallel on N processes. If N is not specified, run on all available CPU threads.")
parser.add_argument('-m','--mpi-flags', action="append", required=False,
help="Additional flags that are passed to mpirun when run in parallel. "
"May be specified multiple times.")