Page MenuHomec4science

reader.cpp
No OneTemporary

File Metadata

Created
Sat, May 11, 11:27

reader.cpp

//
// Created by Arnaud Pannatier on 06.05.18.
//
#include "reader.h"
#include <fstream>
#include <iostream>
#include <limits>
Grid Reader::readGridFromFile (std::string filename,int nx, int ny) {
// Prepare the grid of the good size
Grid ret = Grid(nx,ny);
//std::cout << filename << std::endl;
// create the stream to read the file
std::ifstream file (filename, std::ios::in | std::ios::binary);
if (file.is_open())
{
// Store all the binary data in memory in a big array fo char
char * memblock;
// Count the size of the file
file.ignore( std::numeric_limits<std::streamsize>::max() );
std::streamsize size = file.gcount();
file.clear();
//std::cout << size << " -- size " << std::endl;
// prepare the memory block
memblock = new char[size];
// Read "size" bytes from the files
file.seekg (0, std::ios::beg);
file.read (memblock, size);
// Reading is done.
file.close();
//std::cout << "the entire file content is in memory" << std::endl;
// Recast the value in memory in the proper format in a big array of double
double* double_values = reinterpret_cast<double*>(memblock);
// Assign the good values to the right place
for(int x(0); x<nx;x++) {
for (int y (0); y < ny; y++) {
ret (x, y) = double_values[y+ny*x];
}
}
// Get rid of the memory block, take care of double values as well
delete memblock;
}else{
std::cout<< "File is not open ! " << std::endl;
}
return ret;
}
Grid Reader::ParallelReadFromFile (std::string filename,int nx, int ny, MPI_Comm communicator) {
/// Get info on the parallel information from communicator
int prank, psize;
MPI_Comm_rank(communicator, &prank);
MPI_Comm_size(communicator, &psize);
MPI_Status status;
/// Prepare the final size with ghost cells
int local_x = nx;
int local_y = ny;
/// adding the ghosts lines if needed
if (psize > 1)
local_x += (prank == 0 || prank == psize - 1) ? 1 : 2;
/// Assign only the non-ghost cells
int x_start = (prank == 0 ? 0 : 1);
int x_end = (prank == psize - 1 ? local_x : local_x - 1);
/// Prepare the return grid of the proper size (with place for ghost cells
Grid ret(local_x, local_y);
/// Open file with MPI
MPI_File file;
MPI_File_open( communicator, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file );
/// Prepare a buffer of the process size
double* buffer;
buffer = new double[nx*ny];
/// Read contiguous elements
MPI_File_read_ordered (file, buffer,nx*ny, MPI_DOUBLE, &status);
//std::cout<< "Reading ! " << std::endl;
/// the data is stored in a long vector we just have to read it in the right way
int k = 0;
for(int x(x_start); x<x_end;x++) {
for (int y (0); y < ny; y++) {
ret(x,y)= buffer[k];
k++;
}
}
/// Clean
MPI_File_close( &file );
delete buffer;
return ret;
}
Grid Reader::ParallelReadFromColumnMajorFile(std::string filename,int nx, int ny,int offset, MPI_Comm communicator) {
/// Get info on the parallel information from communicator
int prank, psize;
MPI_Comm_rank(communicator, &prank);
MPI_Comm_size(communicator, &psize);
MPI_Status status;
/// Prepare the final size with ghost cells
int local_x = nx;
int local_y = ny;
/// adding the ghosts lines if needed
if (psize > 1)
local_x += (prank == 0 || prank == psize - 1) ? 1 : 2;
/// Assign only the non-ghost cells
int x_start = (prank == 0 ? 0 : 1);
int x_end = (prank == psize - 1 ? local_x : local_x - 1);
/// Define a new type for reading in column major, assume that we want to read the following grid [1 2 3] in column major with two process
/// [4 5 6]
/// [7 8 9]
/// We read ny number of block , each block contains nx elements (corresponding to the row), and with ny elements between the start of each block
/// For the second process : ny = 3, nx = 1, ny = 3 -> we read [3,6,9]
MPI_Datatype vec;
MPI_Type_vector (ny,nx,ny,MPI_DOUBLE,&vec);
MPI_Type_commit (&vec);
/// Open the file with MPI
MPI_File file;
MPI_File_open( communicator, filename.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &file );
/// Offset for the process
MPI_Offset disp = sizeof(double)*offset;
/// Set the view for the file with our new datatype
MPI_File_set_view(file,disp, MPI_DOUBLE, vec,"native", MPI_INFO_NULL);
/// Prepare a buffer
double* values;
values = new double[nx*ny];
MPI_File_read_all (file, values,nx*ny, MPI_DOUBLE, &status);
/// Prepare the grid
Grid ret(local_x, local_y);
/// the data is stored in a long vector we just have to read it in the right way
int k = 0;
for (int y (0); y < ny; y++) {
for(int x(x_start); x<x_end;x++) {
ret (x, y) = values[k];
k++;
}}
/// Clean
MPI_File_close( &file );
delete values;
return ret;
}
void Reader::writeGridInFile (Grid& g,std::string filename, int nx, int ny ) {
//std::cout << filename << std::endl;
//Writing : Grid -> array of doubles -> array of char -> binary
double* double_values;
double_values= new double[nx*ny];
// prepare the array of double
for(int x(0); x<nx;x++) {
for (int y (0); y < ny; y++) {
double_values[x * nx + y] = g(y,x);
}
}
// transfert array of doubles to array of char
char* memblock = reinterpret_cast<char*>(double_values);
// writer
std::ofstream file(filename, std::ios::out | std::ios::binary);
// write
file.write (memblock, sizeof(memblock)*nx*ny);
std::cout << "Grid Saved ! " << std::endl;
// Clean the pointers
delete double_values;
}
void Reader::ParallelwriteGridInFile (double* g,std::string filename, int nx, int ny, MPI_Comm communicator) {
//std::cout << std::endl << "Writer " << std::endl;
/// Get info on the parallel information from communicator
int prank, psize;
MPI_Comm_rank(communicator, &prank);
MPI_Comm_size(communicator, &psize);
MPI_Status status;
/// Open file with MPI
MPI_File file;
MPI_File_open( communicator, filename.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &file );
/// Write the contiguous elements
MPI_File_write_ordered ( file, g, nx*ny, MPI_DOUBLE, &status );
/// Clean
MPI_File_close( &file );
}
void Reader::ParallelwriteGridInColumnMajorFile(double* g,std::string filename, int nx, int ny,int offset, MPI_Comm communicator) {
/// Get info on the parallel information from communicator
int prank, psize;
MPI_Comm_rank(communicator, &prank);
MPI_Comm_size(communicator, &psize);
MPI_Status status;
/// Define a new type for reading in column major, assume that we want to read the following grid [1 2 3] in column major with two process
/// [4 5 6]
/// [7 8 9]
/// We read ny number of block , each block contains nx elements (corresponding to the row), and with ny elements between the start of each block
/// For the second process : ny = 3, nx = 1, ny = 3 -> we read [3,6,9]
MPI_Datatype vec;
MPI_Type_vector (ny,nx,ny,MPI_DOUBLE,&vec);
MPI_Type_commit (&vec);
/// Open file with MPI
MPI_File file;
MPI_File_open( communicator, filename.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &file );
/// Set the offset and the view for the file
MPI_Offset disp = sizeof(double)*offset;
MPI_File_set_view(file,disp, MPI_DOUBLE, vec,"native", MPI_INFO_NULL);
/// Write in Column major
MPI_File_write_all ( file, g, nx*ny, MPI_DOUBLE, &status );
/// Clean
MPI_File_close( &file );
}
std::vector<double> Reader::PrepareVectorForColumnMajor (std::vector<double> arr, int nx, int ny) {
/// Transpose the local grid for the writing in column major
std::vector<double> ret(ny*nx);
int k =0;
for(int y(0); y<ny;y++){
for(int x(0); x<nx;x++){
ret[k] = arr[y+ny*x];
k++;
}
}
return ret;
}

Event Timeline