#include "CnUncertainity.h" #include "Subbasin.h" #include "easylogging++.h" #include <iostream> #include <mpi.h> namespace math1d_cl { CnUncertainity::CnUncertainity(std::shared_ptr<AbstractRandom> random, UncertainityOptions options,std::shared_ptr<MatData> matData) : AbstractParam(random, options, matData) { m_subbasinValues.reserve(m_options.simulationCount); m_subbasinOriginalValues.reserve(matData->getSubbasins().size()); // Get subbasin's CN curve number for(std::vector<std::shared_ptr<Subbasin>>::const_iterator it = matData->getSubbasins().begin(); it != matData->getSubbasins().end(); ++it) { m_subbasinOriginalValues.push_back((*it)->getCn()); } } void CnUncertainity::setParam(MatData &matData) { if(m_currentIterationNumber > m_subbasinValues.size()) { std::cerr << "ERROR: Manning uncertainity: Current iteration number bigger than generated values."; std::exit(-1); } else { for(size_t i = 0; i < matData.getSubbasins().size(); ++i) { matData.getSubbasins()[i]->setCn(m_subbasinValues[m_currentIterationNumber][i]); } } /* Do not forget to increment iteration number after each setParam call !*/ m_currentIterationNumber++; } void CnUncertainity::generateValues() { int rank = -1, numProc = -1; // Holds rank of current process and total number of processes MPI_Comm_size(MPI_COMM_WORLD, &numProc); MPI_Comm_rank(MPI_COMM_WORLD, &rank); const size_t chunkSize = m_options.simulationCount / numProc; // Monte Carlo iterations per chunk const size_t valuesPerChunk = m_subbasinOriginalValues.size() * chunkSize; // Values per chunk // One chunk of values for one process // Allocate array for one chunk double *randomizedValuesChunk = new double[valuesPerChunk]; // Allocate array for each process //m_subbasinValuesChunk = new double[m_subbasinOriginalValues.size() * chunkSize]; int slaveRank = 1; if(rank == 0) { /* Generate randomized values */ CLOG(INFO,"montecarlo") << "Randomizing CN curve numbers..."; CLOG(INFO,"montecarlo") << "[MPI]Total chunk count: " << m_options.simulationCount/chunkSize; CLOG(INFO,"montecarlo") << "[MPI]Chunksize: " << chunkSize; for(size_t mc = 0; mc < (size_t)m_options.simulationCount; ++mc) { double min = 0.0, max = 0.0; // Random generator boundaries // Subbasins //for(std::vector<double>::const_iterator it = m_subbasinOriginalValues.begin(); it != m_subbasinOriginalValues.end(); ++it) for(size_t i = 0; i < m_subbasinOriginalValues.size(); ++i) { int chunkNum = i + ((mc % chunkSize) * m_subbasinOriginalValues.size()); // Compute index //CLOG(INFO,"montecarlo") << "[MPI]ChunkNum: " << chunkNum; if(m_subbasinOriginalValues[i] >= 0 && m_options.cnDeviation != 0) // Skip not defined values (-999.999) { switch(m_options.cnRandType) { case RandomType::KERNEL_DENSITY : std::cerr << "ERROR: CN cannot be modeled by kernel density."; std::exit(-1); break; case RandomType::UNIFORM : // Lower and upper boundary of selected deviation // Mind limits given in options min = keepLimits(m_subbasinOriginalValues[i] - (m_subbasinOriginalValues[i] * m_options.cnDeviation), m_options.cnLimits); max = keepLimits(m_subbasinOriginalValues[i] + (m_subbasinOriginalValues[i] * m_options.cnDeviation), m_options.cnLimits); // Sample random value within computed boundaries //randomSubbasin.push_back(m_random->getRandDouble(min, max)); randomizedValuesChunk[chunkNum] = m_random->getRandDouble(min, max); break; case RandomType::NORMAL : { double mean = 0.0; double stddev = (m_subbasinOriginalValues[i] * 0.02) / 3; randomizedValuesChunk[chunkNum] = keepLimits(m_subbasinOriginalValues[i] + m_random->getRandDouble(mean, stddev), m_options.cnLimits); } break; default: std::cerr << "ERROR: Bad random function, check random function options."; std::exit(-1); break; } } else { randomizedValuesChunk[chunkNum] = m_subbasinOriginalValues[i]; } } //randomSubbasinChunk.push_back(randomSubbasin); //randomSubbasin.clear(); if(((mc+1) % chunkSize) == 0) // One chunk generated, send it to its process { if((mc+1) == chunkSize) // Keep first chunk to root process { //std::copy(&randomizedValuesChunk[0], &randomizedValuesChunk[(chunkSize-1)], std::back_inserter(m_subbasinValues)); //memcpy(randomizedValuesChunk, m_subbasinValuesChunk, chunkSize); for(size_t iter = 0; iter < chunkSize; ++iter) { std::vector<double> iterationBuffer; for(size_t sub = 0; sub < m_subbasinOriginalValues.size(); ++sub) { iterationBuffer.push_back(randomizedValuesChunk[sub + (iter * m_subbasinOriginalValues.size())]); } m_subbasinValues.push_back(iterationBuffer); iterationBuffer.clear(); } CLOG(INFO,"montecarlo") << "[MPI]Root rank: Got " << m_subbasinValues.size() << " iterations in chunk"; } else { // Send chunk to its respective process //int slaveRank = (int) ((mc+1) / chunkSize); // Determine destination chunk CLOG(INFO,"montecarlo") << "[MPI]Sent chunk no." << mc/chunkSize << " to slave rank: " << slaveRank; MPI_Send(randomizedValuesChunk, valuesPerChunk, MPI_DOUBLE, slaveRank, m_tag, MPI_COMM_WORLD); // Increment slave rank to next process slaveRank++; } } } CLOG(INFO,"montecarlo") << "DONE."; } /* Receive values from slave */ if(rank > 0) { // Receive values from root MPI_Status mpiStat; MPI_Recv(randomizedValuesChunk, valuesPerChunk, MPI_DOUBLE , 0, m_tag, MPI_COMM_WORLD, &mpiStat); CLOG(INFO,"montecarlo") << "[MPI]Slave rank " << rank << ": Received chunk from " << mpiStat.MPI_SOURCE << slaveRank; // Copy received values to memeber vector std::vector<double> iterationBuffer; for(size_t iter = 0; iter < chunkSize; ++iter) { for(size_t sub = 0; sub < m_subbasinOriginalValues.size(); ++sub) { iterationBuffer.push_back(randomizedValuesChunk[sub + (iter * m_subbasinOriginalValues.size())]); } m_subbasinValues.push_back(iterationBuffer); iterationBuffer.clear(); } // Print first element of each iteration in received chunk /*for(std::vector<std::vector<double>>::const_iterator it = m_subbasinValues.begin(); it != m_subbasinValues.end(); ++it) { CLOG(INFO,"montecarlo") << (*it).at(0); }*/ } // Delete buffer delete[] randomizedValuesChunk; } }