Commit ff94bcf2 authored by Dion Haefner's avatar Dion Haefner

split random field generator in parallel and sequential components

parent 38d5508c
configure_file(dorie.in dorie @ONLY)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/dorie DESTINATION ${CMAKE_INSTALL_BINDIR})
configure_file(dorie.in ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/dorie @ONLY)
# copy the temporary file into the final destination, setting the permissions
file(COPY ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/dorie
DESTINATION ${CMAKE_CURRENT_BINARY_DIR}
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/dorie DESTINATION ${CMAKE_INSTALL_BINDIR} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ
GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
......@@ -5,8 +5,8 @@ DORIEDIR=@CMAKE_BINARY_DIR@
MPIRUN=@MPIEXEC@
############################
DORIE_EXECUTABLE=$DORIEDIR/src/dorie
PARAMETERDIR=$DORIEDIR/doc/parameters
DORIE_EXECUTABLE=$DORIEDIR/dune/dorie/dorie
PARAMETERDIR=$DORIEDIR/doc/default_files
DORIE_PYTHON=$DORIEDIR/dune-env
run_dorie_sequential()
......@@ -30,8 +30,10 @@ initialize_folder()
echo "###################"
echo "INITIALIZING FOLDER"
echo "###################"
cp -iv $PARAMETERDIR/default.ini parameter.ini
cp -iv $PARAMETERDIR/default_bc.dat bc.dat
cp -iv $PARAMETERDIR/default.ini $PWD/config.ini
cp -iv $PARAMETERDIR/default_pf.ini $PWD/parfield.ini
cp -iv $PARAMETERDIR/2d_infiltr.bcdat $PWD/2d_infiltr.bcdat
cp -iv $PARAMETERDIR/3d_infiltr.bcdat $PWD/3d_infiltr.bcdat
echo "Initialization done"
}
......@@ -40,6 +42,11 @@ plot_vtk()
$DORIE_PYTHON plot_vtk.py --vtk $1 --var $2
}
pf_from_file()
{
$DORIE_PYTHON pf_from_file.py --param $1 --debug
}
###################
# CHECK IF DORIE WAS INSTALLED
......@@ -51,6 +58,7 @@ fi
RUN=0
PLOT=0
CREATE=0
PFG=0
PARALLEL=0
# NO ARGUMENTS SUPPLIED
......@@ -62,8 +70,7 @@ fi
# PARSE EACH ARGUMENT
case $1 in
run)
run) # run dorie
RUN=1
shift
if [[ $# > 0 ]]; then
......@@ -97,6 +104,16 @@ case $1 in
esac
fi
if [[ -e $INIFILE && -f $INIFILE && -s $INIFILE ]]; then
if [[ $PARALLEL > 0 ]]; then
run_dorie_parallel $INIFILE $PARALLEL
else
run_dorie_sequential $INIFILE
fi
else
echo "Error: Invalid parameter file $INIFILE"
fi
exit
;;
plot)
......@@ -125,20 +142,36 @@ case $1 in
shift
done
fi
plot_vtk "$VTKFILE" "$VARIABLES"
exit
;;
create)
CREATE=1
initialize_folder
exit
;;
pfg)
shift
if [[ $# = 1 ]]; then
pf_from_file $1
exit
else
echo "Usage: dorie pfg INIFILE. For more information run 'dorie help'"
exit
fi
;;
help)
echo -e "Usage: dorie COMMAND [COMMAND-OPTIONS]\n"
echo -e "COMMANDS:"
echo -e " 'run INIFILE [OPTIONS]' starts DORiE with parameter file INIFILE"
echo -e " OPTIONS:"
echo -e " '--parallel NPROC' starts DORiE in parallel with NPROC processes"
echo -e " '--sequential' starts DORiE in sequential mode (default)"
echo -e " OPTIONS:"
echo -e " '--parallel NPROC' starts DORiE in parallel with NPROC processes"
echo -e " '--sequential' starts DORiE in sequential mode (default)"
echo -e " 'create' creates dummy parameter files in the current directory"
echo -e " 'pfg INIFILE' runs the parameter field generator"
echo -e " 'plot VTK [--var VAR]' plots the variables VAR (default: all variables) from the VTK file(s) VTK"
echo -e " 'help' displays this message"
......@@ -150,31 +183,3 @@ case $1 in
exit
;;
esac
# RUN DORIE
if [[ $RUN = 1 ]]; then
if [[ -e $INIFILE && -f $INIFILE && -s $INIFILE ]]; then
if [[ $PARALLEL > 0 ]]; then
run_dorie_parallel $INIFILE $PARALLEL
else
run_dorie_sequential $INIFILE
fi
else
echo "Error: Invalid parameter file $INIFILE"
fi
exit
fi
# INITIALIZE FOLDER
if [[ $CREATE = 1 ]]; then
initialize_folder
exit
fi
# PLOT VTK FILES
if [[ $PLOT = 1 ]]; then
plot_vtk "$VTKFILE" "$VARIABLES"
exit
fi
# File for module specific CMake tests.
# find all required packages
FIND_PACKAGE (HDF5 REQUIRED)
FIND_PACKAGE (HDF5 REQUIRED COMPONENTS C)
if(HDF5_IS_PARALLEL)
message(STATUS "Parallel HDF5 library found")
add_definitions(-DHDF5_PARALLEL)
endif()
FIND_PACKAGE (FFTW REQUIRED)
FIND_PACKAGE (SuperLU REQUIRED)
FIND_PACKAGE (MPI REQUIRED)
......
/*
* File: HDF5Tools.hh
* Author: Adrian Ngo
*
* This requires the MPI version of HDF5.
*
* Overview of HDF5 methods:
* h5_Write : sequential write of multi-dim array to HDF5 file
* h5g_pWrite : parallel write of grid data to HDF5 file
* h5g_pRead : parallel read of grid data from HDF5 file
* h5_Read : sequential read of multi-dim array from HDF5 file
* h5_pAppend : parallel append of multi-dim array to exisiting HDF5 file
* h5_pWrite1d: parallel write of local 1d arrays to a global 1d array on one continuous disjointly hyperslabbed HDF5 file
* h5_pRead1d : parallel read of local 1d arrays from a global 1d array on one continuous disjointly hyperslabbed HDF5 file
*
*/
#ifndef DUNE_GEOINVERSION_HDF5_TOOLS_HH
#define DUNE_GEOINVERSION_HDF5_TOOLS_HH
#include <dune/pdelab/common/geometrywrapper.hh>
#include <assert.h>
#include <sstream>
namespace Dune {
namespace Dorie {
class H5Tools{
private:
H5Tools(){};
public:
/*
* Note that in hdf5, all array indices are ordered the other way round!
*
* So far, we had this:
*
* nCells[0] = number of cells in x-direction
* nCells[1] = number of cells in y-direction
* nCells[2] = number of cells in z-direction
*
* But storing data in hdf5 requires to define a vector 'dims' (in 3d) with
*
* dims[2] = (hsize_t) ( nCells[0] ); // x-direction
* dims[1] = (hsize_t) ( nCells[1] ); // y-direction
* dims[0] = (hsize_t) ( nCells[2] ); // z-direction
*
* and in 2d:
*
* dims[1] = (hsize_t) ( nCells[0] ); // x-direction
* dims[0] = (hsize_t) ( nCells[1] ); // y-direction
*
*/
/** h5_Write(): Write multi-dim. array in sequential mode to HDF5 file.
*
*
*
\tparam [in] data: vector containing the data to be stored
\tparam [in] filename: HDF5 filename
\tparam [in] groupname: HDF5 group
\tparam [in] nCells: number of cells in each dimension
*
*
*
See
http://www.hdfgroup.org/HDF5/doc/RM/RM_H5F.html
for a documentation of the HDF5 API.
*
*/
template<typename IVEC,typename VEC>
static void h5_Write( const VEC & data
, const std::string & filename
, const std::string & groupname
, const IVEC & nCells
)
{
std::cout << "h5_Write: " << filename << std::endl;
UINT dim = nCells.size();
//std::cout << std::endl << "h5_Write: dim = " << dim << std::endl;
std::cout << "h5_Write: groupname = " << groupname << std::endl;
/* Create a new file using default properties. */
hid_t file_id = H5Fcreate( filename.c_str() // name of the file to be created
, H5F_ACC_TRUNC // if you are trying to create a file that exists already, the existing file will be truncated, i.e., all data stored on the original file will be erased
, H5P_DEFAULT // default file creation property list
, H5P_DEFAULT // default file access property list
);
assert( file_id > -1 );
//std::cout << "h5_Write: h5-file created: " << filename.c_str() << std::endl;
hsize_t mdims[1];
mdims[0] = data.size();
hid_t memspace_id = H5Screate_simple( // H5Screate_simple creates a new simple dataspace and opens it for access, returning a dataset identifier.
1 // rank=1 is the number of dimensions used in the dataspace
, mdims // mdims is a one-dimensional array of size rank specifying the size of each dimension of the dataset.
, NULL // maxdims is an array of the same size specifying the upper limit on the size of each dimension. maxdims may be NULL, in which case the maxdims == mdims.
); // must be released with H5Sclose or resource leaks will occur
assert( memspace_id > -1 );
//std::cout << "h5_Write: memspace_id created." << std::endl;
/* Create the dataspace for the dataset.
* The dataspace describes the dimensions of the dataset array. */
hsize_t mindims = 50000;
hsize_t dims[ dim ];
for(UINT i=0;i<dim;i++){
dims[dim-i-1] = (hsize_t) ( nCells[i] );
mindims = std::min( mindims, dims[dim-i-1] );
}
//std::cout << "dims [0] = "<< dims[0]<<std::endl;
//std::cout << "dims [1] = "<< dims[1]<<std::endl;
hid_t dataspace_id = H5Screate_simple(
dim // number of dimensions = rank
, dims // vector containing sizes per dimension
, NULL // maxdims == dims
);
assert( dataspace_id > -1 );
//std::cout << "h5_Write: dataspace_id created." << std::endl;
hid_t plist_id = H5Pcreate( // The new property list is initialized with default values for the specified class.
H5P_DATASET_CREATE // Properties for dataset creation
);
assert( plist_id > -1 );
//std::cout << "h5_Write: plist_id created." << std::endl;
herr_t status;
hsize_t maxnchunks = 1;
hsize_t chunk_dims[ dim ];
for(UINT i=0;i<dim;i++)
chunk_dims[i] = (hsize_t) 1;
status = H5Pset_chunk(
plist_id
, dim // must be == rank of the dataset
, chunk_dims // The values of the check_dims array define the size of the chunks to store the dataset's raw data. The unit of measure for check_dims values is dataset elements.
);
assert( status > -1 );
//std::cout << "h5_Write: H5Pset_chunk() o.k." << std::endl;
status = H5Pset_shuffle( plist_id ); // Sets the shuffle filter, H5Z_FILTER_SHUFFLE, in the dataset creation property list. This re-orders data to simplify compression.
assert( status > -1 );
//std::cout << "h5_Write: H5Pset_shuffle() o.k." << std::endl;
status = H5Pset_deflate( plist_id, 1 ); // Sets deflate (GNU gzip) compression method and compression level. ( 0 < level < 9, lower = faster, but less compression )
assert( status > -1 );
//std::cout << "h5_Write: H5Pset_deflate() o.k." << std::endl;
/* Create the dataset. */
hid_t dataset_id = H5Dcreate1( file_id,
groupname.c_str(),
HDF5_DATA_TYPE, // 32-bit or 64-bit, "datatypes.hh"
dataspace_id,
plist_id
);
assert( dataset_id > -1 );
//std::cout << "h5_Write: dataset_id created." << std::endl;
/* Write the dataset. */
status = H5Dwrite( // Writes raw data from a buffer to a dataset.
dataset_id // dataset identifier
, H5T_NATIVE_DOUBLE // memory datatype id
, memspace_id // specifies the memory dataspace and the selection within it
, H5S_ALL // specifies the selection within the file dataset's dataspace. H5S_ALL indicates that the entire file dataspace, as defined by the current dimensions of the dataset, is to be selected
, H5P_DEFAULT // Identifier of a transfer property list for this I/O operation. H5P_DEFAULT: The default data transfer properties are used.
, &(data[0]) // application memory buffer
);
assert( status > -1 );
//std::cout << "h5_Write: H5Dwrite( " <<groupname.c_str()<<" ) o.k." << std::endl;
status = H5Sclose( dataspace_id );
assert( status > -1 );
//std::cout << "h5_Write: dataspace_id closed." << std::endl;
status = H5Sclose( memspace_id );
assert( status > -1 );
//std::cout << "h5_Write: memspace_id closed." << std::endl;
status = H5Dclose( dataset_id );
assert( status > -1 );
//std::cout << "h5_Write: dataset_id closed." << std::endl;
/* Close the property list. */
status = H5Pclose( plist_id );
assert( status > -1 );
//std::cout << "h5_Write: H5Pclose(plist_id) done." << std::endl;
/* Close the file. */
status = H5Fclose( file_id );
assert( status > -1 );
//std::cout << "h5_Write: H5Fclose( file_id ) done." << std::endl;
return;
}
/** h5g_pWrite(): Writing grid data in parallel mode to HDF5 file.
*
* \tparam [in] data: grid data from local partition of the grid to be written to the current hyperslab
* \tparam [in] filename: HDF5 filename
* \tparam [in] groupname: HDF5 group
* \tparam [in] gv: gridview
* \tparam [in] nCells: the global number of cells for each dimension
* \tparam [in] nChunks: HDF5 chunks in each dimension
* \tparam [in] gridsizes: grid-sizes, needed only for extra data (not used so far)
*
See
http://www.hdfgroup.org/HDF5/doc/RM/RM_H5F.html
for a documentation of the HDF5 API.
*
*/
template<typename GV,typename VEC>
static void h5g_pWrite( const VEC &datavector,
const std::string& filename,
const std::string& groupname,
const GV& gv,
const VEC &gridsizes,
const Vector<UINT> &nCells,
const int blocksizePerDimension = 1,
const FEMType::Type femtype=FEMType::DG,
const int current_grid_level=0,
const bool preserve_structure=true
)
{
std::cout << "h5g_pWrite: ... " << filename << std::endl;
//get the communicator of the dune grid
//const Dune::MPIHelper::MPICommunicator &comm = gv.comm();
//Info variable need for the HDF5
MPI_Info mpiInfo = MPI_INFO_NULL;
//some needed variables and typdefs
const UINT dim = GV::dimension; // dimensionality of the problem (2-d or 3-d)
// DO NOT CHANGE THIS EVER: Leave it to be "Dune::All_Partition".
// Since datavector contains ALL elements of the vector backend,
// we must consider over ALL cells. Neglecting overlap cells would
// shift the solution on the HDF file, leading to wrong inversion results!
const Dune::PartitionIteratorType partitiontype
= Dune::All_Partition;
typedef typename GV::template Codim<0>::template Partition<partitiontype>::Iterator LeafIterator;
//typedef typename GV::Grid GRIDTYPE;
//variables for defining the offset and the count in the different dimensions
Vector<UINT> max_index( dim, 0 );
Vector<UINT> min_index( dim, 100000 );
/*
1.) Loop over the current hyperslab (hyperslab = all the leaf grid-cells that belong to the current processor) and get the global coordinate 'x' of each grid-cell center.
Remark: From now on, we are working solemnly with the virtual Y-field grid, using virtual indices. Later, in the evaluate2d() and evaluate3d() member functions of the class kfieldgenerator, we are doing the very same conversion from a global coordinate to the virtual global index. That's why it can work independently of the grid type!
2.) Translate this global coordinate 'x' into its corresponding Yfield-tensor-index 'global_index'. This requires only the virtual gridsize of the virtual Yfield grid. So 'global_index' is just a virtual index.
3.) 'min_index' corresponds to the virtual cell with the lowest coordinate values in the current hyperslab. It is the offset (= translational displacement) from the very first virtual cell (0,0).
4.) 'local_count[i]' is the number of virutal cells in the dimension 'i' of the current hyperslab.
Remark: The volume of the current hyperslab is 'nAllLocalCells'. 'mappersize' == 'nAllLocalCells' only if the resolution of the yaspgrid == resolution of the virtual grid !
*/
std::cout << "h5g_pWrite: gridsizes = "
<< gridsizes << std::endl;
for (LeafIterator it = gv.template begin<0,partitiontype> ()
; it != gv.template end<0,partitiontype> ()
; ++it) {
if( !preserve_structure ){
// Do not consider cells on overlap!
if( it->partitionType() != Dune::InteriorEntity ){
continue;
}
}
// Get the local coordinate:
Dune::FieldVector<CTYPE,dim> x = it->geometry().center();
std::vector<UINT> global_index(dim,0);
for(UINT i=0; i<dim; i++ ) {
global_index[i] = static_cast<UINT>( x[i] / gridsizes[i] );
max_index[i] = std::max( global_index[i], max_index[i] );
min_index[i] = std::min( global_index[i], min_index[i] );
}
} // end of loop over leaf elements
MPI_Barrier( gv.comm() );
//int mpi_rank = gv.comm().rank();
// Set up file access property list with parallel I/O access
hid_t plist_id = H5Pcreate( H5P_FILE_ACCESS );
//std::cout << "h5g_pWrite: H5Pcreate() done. plist_id = "
// << plist_id << std::endl;
assert( plist_id > -1 );
herr_t status = H5Pset_fapl_mpio( plist_id, gv.comm(), mpiInfo );
//std::cout << "h5g_pWrite: H5Pset_fapl_mpio: status = "
// << status << std::endl;
assert( status > -1 );
if( status < 0 )
std::cout << "Warning: H5Pset_fapl_mpio < 0" << std::endl;
// Create a new file collectively and release property list identifier.
//std::cout << "h5g_pWrite: "
// << " create file "
// << filename.c_str()
// << std::endl;
hid_t file_id = H5Fcreate( filename.c_str(),
H5F_ACC_TRUNC,
H5P_DEFAULT,
plist_id );
H5Pclose( plist_id );
//std::cout << "h5g_pWrite: "
// << filename.c_str()
// << " file created!" << std::endl;
assert( file_id > -1 );
//==========================================================
// "dims_global" defines the filespace of the HDF5 file:
// It is process-independent and contains all DOFs.
//==========================================================
hsize_t dims_global[ dim];
for(UINT i=0; i<dim; i++) {
dims_global[dim -i-1] = nCells[i] * blocksizePerDimension;
if(femtype==FEMType::CG){
dims_global[dim -i-1] += 1;
}
}
// Create the dataspace for the dataset.
hid_t filespace = H5Screate_simple( dim, dims_global, NULL );
assert(filespace>-1);
//std::cout << "h5g_pWrite: fileSPACE created!" << std::endl;
// Create the dataset with default properties and close filespace.
hid_t dset_id = H5Dcreate1( file_id,
groupname.c_str(),
HDF5_DATA_TYPE, // 32-bit or 64-bit, "datatypes.hh"
filespace,
H5P_DEFAULT );
H5Sclose(filespace);
assert(dset_id>-1);
//std::cout<< "h5g_pWrite: dataset created!" << std::endl;
// set the count in the different dimensions (determine the size of the hyperslab)
hsize_t count[ dim ];
for(UINT i=0; i<dim; i++ ) {
if(femtype==FEMType::CG){
// be careful!!! needed when vertex-based data instead of cell based data
count[dim-i-1] = max_index[i] - min_index[i] + 2; // Q1-FEM: in each dimension, #nodes = #elements+1
}
else{
count[dim-i-1] = max_index[i] - min_index[i] + 1;
count[dim-i-1] *= blocksizePerDimension;
}
}
//define the total size of the local data
hsize_t nAllLocalCells = count[0] * count[1];
if( dim==3 )
nAllLocalCells *= count[2];
//set the offset of the data!
hsize_t offset[ dim ];
for(UINT i=0; i<dim; i++ ) {
if(femtype==FEMType::CG)
offset[dim-i-1] = min_index[i];
else
offset[dim-i-1] = min_index[i] * blocksizePerDimension;
}
// Each process defines dataset in memory and writes it to the hyperslab in the file.
hid_t memspace_id;
if(nAllLocalCells!=0){ // -> otherwise HDF5 warning, because of writing nothing!
memspace_id = H5Screate_simple(dim, count, NULL);
assert(memspace_id>-1);
}
//std::cout<< "h5g_pWrite: memspace created!" << std::endl;
// Select hyperslab in the file.
filespace = H5Dget_space( dset_id );
H5Sselect_hyperslab( filespace,
H5S_SELECT_SET,
offset,
NULL, // <==> stride={1,1,1}
count,
NULL // <==> block={1,1,1}
);
//std::cout<< "h5g_pWrite: hyperslab selected!" << std::endl;
// Create property list for collective dataset write.
plist_id = H5Pcreate( H5P_DATASET_XFER );
// all MPI processors of this communicator need to call this (no matter if they write or not)
H5Pset_dxpl_mpio( plist_id, H5FD_MPIO_COLLECTIVE );
//H5Pset_dxpl_mpio( plist_id, H5FD_MPIO_INDEPENDENT );
//std::cout<< "h5g_pWrite: properties set!" << std::endl;
// finally write the data to the disk
//std::cout<< "h5g_pWrite: writing ... " << std::endl;
if( nAllLocalCells != 0 ){
status = H5Dwrite( dset_id,
H5T_NATIVE_DOUBLE,
memspace_id,
filespace,
plist_id,
&( datavector[0] )
);
assert(status>-1);
}
else{ // avoid hdf5 blocking
status = H5Dwrite( dset_id,
H5T_NATIVE_DOUBLE,
H5S_ALL,
filespace,
plist_id,
&( datavector[0] )
);
assert(status>-1);
}
std::cout << "h5g_pWrite: .... done (writing) "
<< filename
<< std::endl;
// close the used identifyers
H5Dclose(dset_id);
H5Sclose(filespace);
H5Sclose(memspace_id);
H5Pclose(plist_id);
H5Fclose(file_id);
return;