Compare commits

...

12 Commits

16 changed files with 4124 additions and 919 deletions

View File

@@ -29,16 +29,11 @@ using namespace std;
#error "not define ABEtype"
#endif
#if (ABEtype == 0)
#ifdef USE_GPU
#include "bssn_gpu_class.h"
#else
#include "bssn_class.h"
#endif
#elif (ABEtype == 1)
#include "bssnEScalar_class.h"
#if (ABEtype == 0)
#include "bssn_class.h"
#elif (ABEtype == 1)
#include "bssnEScalar_class.h"
#elif (ABEtype == 2)
#include "Z4c_class.h"

View File

@@ -2,29 +2,88 @@
#include <iostream>
#include <iomanip>
#include <fstream>
#include <cstdlib>
#include <cstdio>
#include <string>
#include <cmath>
#include <new>
#include <vector>
using namespace std;
#include <cstdlib>
#include <cstdio>
#include <string>
#include <cmath>
#include <new>
#include <map>
#include <vector>
using namespace std;
#include "misc.h"
#include "MPatch.h"
#include "Parallel.h"
#include "fmisc.h"
#ifdef INTERP_LB_PROFILE
#include "interp_lb_profile.h"
#endif
namespace
{
struct InterpBlockView
{
Block *bp;
double llb[dim];
double uub[dim];
#include "misc.h"
#include "MPatch.h"
#include "Parallel.h"
#include "fmisc.h"
#include "bssn_cuda_ops.h"
#ifdef INTERP_LB_PROFILE
#include "interp_lb_profile.h"
#endif
#if defined(__GNUC__) || defined(__clang__)
extern int bssn_cuda_interp_points_batch(const int *ex,
const double *X, const double *Y, const double *Z,
const double *const *fields,
const double *soa_flat,
int num_var,
const double *px, const double *py, const double *pz,
int num_points,
int ordn,
int symmetry,
double *out) __attribute__((weak));
#endif
namespace
{
struct InterpVarDesc
{
int sgfn;
double soa[dim];
};
struct InterpPlanKey
{
const Patch *patch;
const double *x;
const double *y;
const double *z;
int NN;
int Symmetry;
int myrank;
};
struct InterpPlanKeyLess
{
bool operator()(const InterpPlanKey &lhs, const InterpPlanKey &rhs) const
{
if (lhs.patch != rhs.patch) return lhs.patch < rhs.patch;
if (lhs.x != rhs.x) return lhs.x < rhs.x;
if (lhs.y != rhs.y) return lhs.y < rhs.y;
if (lhs.z != rhs.z) return lhs.z < rhs.z;
if (lhs.NN != rhs.NN) return lhs.NN < rhs.NN;
if (lhs.Symmetry != rhs.Symmetry) return lhs.Symmetry < rhs.Symmetry;
return lhs.myrank < rhs.myrank;
}
};
struct CachedInterpPlan
{
int nblocks;
vector<int> owner_rank;
vector<int> owner_block;
vector<vector<int> > block_points;
vector<vector<double> > block_px;
vector<vector<double> > block_py;
vector<vector<double> > block_pz;
CachedInterpPlan() : nblocks(0) {}
};
struct InterpBlockView
{
Block *bp;
double llb[dim];
double uub[dim];
};
struct BlockBinIndex
@@ -154,10 +213,10 @@ void build_block_bin_index(Patch *patch, const double *DH, BlockBinIndex &index)
index.valid = true;
}
int find_block_index_for_point(const BlockBinIndex &index, const double *pox, const double *DH)
{
if (!index.valid)
return -1;
int find_block_index_for_point(const BlockBinIndex &index, const double *pox, const double *DH)
{
if (!index.valid)
return -1;
const int bx = coord_to_bin(pox[0], index.lo[0], index.inv[0], index.bins[0]);
const int by = coord_to_bin(pox[1], index.lo[1], index.inv[1], index.bins[1]);
@@ -175,10 +234,259 @@ int find_block_index_for_point(const BlockBinIndex &index, const double *pox, co
for (size_t bi = 0; bi < index.views.size(); bi++)
if (point_in_block_view(index.views[bi], pox, DH))
return int(bi);
return -1;
}
} // namespace
return -1;
}
void collect_interp_vars(MyList<var> *VarList, vector<InterpVarDesc> &vars)
{
vars.clear();
MyList<var> *varl = VarList;
while (varl)
{
InterpVarDesc desc;
desc.sgfn = varl->data->sgfn;
for (int d = 0; d < dim; ++d)
desc.soa[d] = varl->data->SoA[d];
vars.push_back(desc);
varl = varl->next;
}
}
bool should_try_cuda_interp(int ordn, int num_points, int num_var)
{
#if defined(__GNUC__) || defined(__clang__)
if (!bssn_cuda_interp_points_batch)
return false;
#else
return false;
#endif
if (ordn != 6)
return false;
if (num_points < 32)
return false;
return num_points * num_var >= 256;
}
CachedInterpPlan &get_cached_interp_plan(Patch *patch,
int NN, double **XX,
int Symmetry, int myrank,
const double *DH,
const BlockBinIndex &block_index,
bool report_bounds_here,
bool allow_missing_points)
{
static map<InterpPlanKey, CachedInterpPlan, InterpPlanKeyLess> cache;
InterpPlanKey key;
key.patch = patch;
key.x = XX[0];
key.y = XX[1];
key.z = XX[2];
key.NN = NN;
key.Symmetry = Symmetry;
key.myrank = myrank;
map<InterpPlanKey, CachedInterpPlan, InterpPlanKeyLess>::iterator it = cache.find(key);
if (it != cache.end() && it->second.nblocks == static_cast<int>(block_index.views.size()))
return it->second;
CachedInterpPlan &plan = cache[key];
plan = CachedInterpPlan();
plan.nblocks = static_cast<int>(block_index.views.size());
plan.owner_rank.assign(NN, -1);
plan.owner_block.assign(NN, -1);
plan.block_points.resize(plan.nblocks);
plan.block_px.resize(plan.nblocks);
plan.block_py.resize(plan.nblocks);
plan.block_pz.resize(plan.nblocks);
for (int j = 0; j < NN; ++j)
{
double pox[dim];
for (int i = 0; i < dim; ++i)
{
pox[i] = XX[i][j];
if (report_bounds_here &&
(XX[i][j] < patch->bbox[i] + patch->lli[i] * DH[i] ||
XX[i][j] > patch->bbox[dim + i] - patch->uui[i] * DH[i]))
{
cout << "Patch::Interp_Points: point (";
for (int k = 0; k < dim; ++k)
{
cout << XX[k][j];
if (k < dim - 1)
cout << ",";
else
cout << ") is out of current Patch." << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
const int block_i = find_block_index_for_point(block_index, pox, DH);
if (block_i >= 0)
{
Block *BP = block_index.views[block_i].bp;
plan.owner_rank[j] = BP->rank;
plan.owner_block[j] = block_i;
if (BP->rank == myrank)
{
plan.block_points[block_i].push_back(j);
plan.block_px[block_i].push_back(XX[0][j]);
plan.block_py[block_i].push_back(XX[1][j]);
plan.block_pz[block_i].push_back(XX[2][j]);
}
}
}
if (!allow_missing_points && report_bounds_here)
{
for (int j = 0; j < NN; ++j)
{
if (plan.owner_rank[j] >= 0)
continue;
cout << "ERROR: Patch::Interp_Points fails to find point (";
for (int d = 0; d < dim; ++d)
{
cout << XX[d][j];
if (d < dim - 1)
cout << ",";
else
cout << ")";
}
cout << " on Patch (";
for (int d = 0; d < dim; ++d)
{
cout << patch->bbox[d] << "+" << patch->lli[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")--";
}
cout << "(";
for (int d = 0; d < dim; ++d)
{
cout << patch->bbox[dim + d] << "-" << patch->uui[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")" << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
return plan;
}
bool run_cuda_interp_for_block(Block *BP,
const vector<InterpVarDesc> &vars,
const vector<int> &point_ids,
const vector<double> &px,
const vector<double> &py,
const vector<double> &pz,
double *Shellf,
int num_var,
int ordn,
int Symmetry)
{
if (!should_try_cuda_interp(ordn, static_cast<int>(point_ids.size()), num_var))
return false;
vector<const double *> field_ptrs(num_var);
vector<double> soa_flat(3 * num_var);
for (int v = 0; v < num_var; ++v)
{
field_ptrs[v] = BP->fgfs[vars[v].sgfn];
for (int d = 0; d < dim; ++d)
soa_flat[3 * v + d] = vars[v].soa[d];
}
const int npts = static_cast<int>(point_ids.size());
vector<double> out(static_cast<size_t>(npts) * static_cast<size_t>(num_var));
if (bssn_cuda_interp_points_batch(BP->shape,
BP->X[0], BP->X[1], BP->X[2],
field_ptrs.data(),
soa_flat.data(),
num_var,
px.data(), py.data(), pz.data(),
npts,
ordn,
Symmetry,
out.data()) != 0)
{
return false;
}
for (int p = 0; p < npts; ++p)
{
const int j = point_ids[p];
memcpy(Shellf + j * num_var, out.data() + p * num_var, sizeof(double) * num_var);
}
return true;
}
void run_cpu_interp_for_block(Block *BP,
const vector<InterpVarDesc> &vars,
const vector<int> &point_ids,
const vector<double> &px,
const vector<double> &py,
const vector<double> &pz,
double *Shellf,
int num_var,
int ordn,
int Symmetry)
{
for (size_t p = 0; p < point_ids.size(); ++p)
{
const int j = point_ids[p];
double x = px[p];
double y = py[p];
double z = pz[p];
int ordn_local = ordn;
int symmetry_local = Symmetry;
for (int v = 0; v < num_var; ++v)
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2],
BP->fgfs[vars[v].sgfn], Shellf[j * num_var + v],
x, y, z, ordn_local, const_cast<double *>(vars[v].soa), symmetry_local);
}
}
}
void interpolate_owned_points(MyList<var> *VarList,
double *Shellf, int Symmetry,
int ordn,
const BlockBinIndex &block_index,
const CachedInterpPlan &plan)
{
vector<InterpVarDesc> vars;
collect_interp_vars(VarList, vars);
const int num_var = static_cast<int>(vars.size());
for (size_t bi = 0; bi < plan.block_points.size(); ++bi)
{
if (plan.block_points[bi].empty())
continue;
Block *BP = block_index.views[bi].bp;
bool done = run_cuda_interp_for_block(BP, vars,
plan.block_points[bi],
plan.block_px[bi],
plan.block_py[bi],
plan.block_pz[bi],
Shellf, num_var, ordn, Symmetry);
if (!done)
run_cpu_interp_for_block(BP, vars,
plan.block_points[bi],
plan.block_px[bi],
plan.block_py[bi],
plan.block_pz[bi],
Shellf, num_var, ordn, Symmetry);
}
}
} // namespace
Patch::Patch(int DIM, int *shapei, double *bboxi, int levi, bool buflog, int Symmetry) : lev(levi)
{
@@ -523,60 +831,15 @@ void Patch::Interp_Points(MyList<var> *VarList,
memset(Shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] records which MPI rank owns point j
// All ranks traverse the same block list so they all agree on ownership
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
for (int j = 0; j < NN; j++) // run along points
{
double pox[dim];
for (int i = 0; i < dim; i++)
{
pox[i] = XX[i][j];
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
{
cout << "Patch::Interp_Points: point (";
for (int k = 0; k < dim; k++)
{
cout << XX[k][j];
if (k < dim - 1)
cout << ",";
else
cout << ") is out of current Patch." << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
const int block_i = find_block_index_for_point(block_index, pox, DH);
if (block_i >= 0)
{
Block *BP = block_index.views[block_i].bp;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
//---> interpolation
varl = VarList;
int k = 0;
while (varl) // run along variables
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
}
}
}
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
CachedInterpPlan &plan = get_cached_interp_plan(this, NN, XX, Symmetry, myrank, DH, block_index, myrank == 0, false);
const int *owner_rank = plan.owner_rank.data();
interpolate_owned_points(VarList, Shellf, Symmetry, ordn, block_index, plan);
// Replace MPI_Allreduce with per-owner MPI_Bcast:
// Group consecutive points by owner rank and broadcast each group.
@@ -631,9 +894,8 @@ void Patch::Interp_Points(MyList<var> *VarList,
MPI_Bcast(Shellf + jstart * num_var, count, MPI_DOUBLE, cur_owner, MPI_COMM_WORLD);
}
}
delete[] owner_rank;
}
}
void Patch::Interp_Points(MyList<var> *VarList,
int NN, double **XX,
double *Shellf, int Symmetry,
@@ -661,102 +923,22 @@ void Patch::Interp_Points(MyList<var> *VarList,
memset(Shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] records which MPI rank owns point j
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
// --- Interpolation phase (identical to original) ---
for (int j = 0; j < NN; j++)
{
double pox[dim];
for (int i = 0; i < dim; i++)
{
pox[i] = XX[i][j];
if (myrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
{
cout << "Patch::Interp_Points: point (";
for (int k = 0; k < dim; k++)
{
cout << XX[k][j];
if (k < dim - 1)
cout << ",";
else
cout << ") is out of current Patch." << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
const int block_i = find_block_index_for_point(block_index, pox, DH);
if (block_i >= 0)
{
Block *BP = block_index.views[block_i].bp;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
varl = VarList;
int k = 0;
while (varl)
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
}
}
}
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
CachedInterpPlan &plan = get_cached_interp_plan(this, NN, XX, Symmetry, myrank, DH, block_index, myrank == 0, false);
const int *owner_rank = plan.owner_rank.data();
interpolate_owned_points(VarList, Shellf, Symmetry, ordn, block_index, plan);
#ifdef INTERP_LB_PROFILE
double t_interp_end = MPI_Wtime();
double t_interp_local = t_interp_end - t_interp_start;
#endif
// --- Error check for unfound points ---
for (int j = 0; j < NN; j++)
{
if (owner_rank[j] < 0 && myrank == 0)
{
cout << "ERROR: Patch::Interp_Points fails to find point (";
for (int d = 0; d < dim; d++)
{
cout << XX[d][j];
if (d < dim - 1)
cout << ",";
else
cout << ")";
}
cout << " on Patch (";
for (int d = 0; d < dim; d++)
{
cout << bbox[d] << "+" << lli[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")--";
}
cout << "(";
for (int d = 0; d < dim; d++)
{
cout << bbox[dim + d] << "-" << uui[d] * DH[d];
if (d < dim - 1)
cout << ",";
else
cout << ")" << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
// --- Targeted point-to-point communication phase ---
// --- Targeted point-to-point communication phase ---
// Compute consumer_rank[j] using the same deterministic formula as surface_integral
int *consumer_rank = new int[NN];
{
@@ -873,9 +1055,8 @@ void Patch::Interp_Points(MyList<var> *VarList,
delete[] send_offset;
delete[] recv_offset;
delete[] send_count;
delete[] recv_count;
delete[] consumer_rank;
delete[] owner_rank;
delete[] recv_count;
delete[] consumer_rank;
#ifdef INTERP_LB_PROFILE
{
@@ -923,64 +1104,20 @@ void Patch::Interp_Points(MyList<var> *VarList,
memset(Shellf, 0, sizeof(double) * NN * num_var);
// owner_rank[j] stores the global rank that owns point j
int *owner_rank;
owner_rank = new int[NN];
for (int j = 0; j < NN; j++)
owner_rank[j] = -1;
// Build global-to-local rank translation for Comm_here
MPI_Group world_group, local_group;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Comm_group(Comm_here, &local_group);
// Build global-to-local rank translation for Comm_here
MPI_Group world_group, local_group;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Comm_group(Comm_here, &local_group);
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
for (int j = 0; j < NN; j++) // run along points
{
double pox[dim];
for (int i = 0; i < dim; i++)
{
pox[i] = XX[i][j];
if (lmyrank == 0 && (XX[i][j] < bbox[i] + lli[i] * DH[i] || XX[i][j] > bbox[dim + i] - uui[i] * DH[i]))
{
cout << "Patch::Interp_Points: point (";
for (int k = 0; k < dim; k++)
{
cout << XX[k][j];
if (k < dim - 1)
cout << ",";
else
cout << ") is out of current Patch." << endl;
}
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
const int block_i = find_block_index_for_point(block_index, pox, DH);
if (block_i >= 0)
{
Block *BP = block_index.views[block_i].bp;
owner_rank[j] = BP->rank;
if (myrank == BP->rank)
{
//---> interpolation
varl = VarList;
int k = 0;
while (varl) // run along variables
{
f_global_interp(BP->shape, BP->X[0], BP->X[1], BP->X[2], BP->fgfs[varl->data->sgfn], Shellf[j * num_var + k],
pox[0], pox[1], pox[2], ordn, varl->data->SoA, Symmetry);
varl = varl->next;
k++;
}
}
}
}
double DH[dim];
for (int i = 0; i < dim; i++)
DH[i] = getdX(i);
BlockBinIndex block_index;
build_block_bin_index(this, DH, block_index);
CachedInterpPlan &plan = get_cached_interp_plan(this, NN, XX, Symmetry, myrank, DH, block_index, lmyrank == 0, true);
const int *owner_rank = plan.owner_rank.data();
interpolate_owned_points(VarList, Shellf, Symmetry, ordn, block_index, plan);
// Collect unique global owner ranks and translate to local ranks in Comm_here
// Then broadcast each owner's points via MPI_Bcast on Comm_here
@@ -1008,10 +1145,9 @@ void Patch::Interp_Points(MyList<var> *VarList,
}
}
MPI_Group_free(&world_group);
MPI_Group_free(&local_group);
delete[] owner_rank;
}
MPI_Group_free(&world_group);
MPI_Group_free(&local_group);
}
void Patch::checkBlock()
{
int myrank;

View File

@@ -1,9 +1,124 @@
#include "Parallel.h"
#include "fmisc.h"
#include "prolongrestrict.h"
#include "misc.h"
#include "parameters.h"
#include "Parallel.h"
#include "fmisc.h"
#include "prolongrestrict.h"
#include "bssn_cuda_ops.h"
#include "misc.h"
#include "parameters.h"
#include <cstring>
#if defined(__GNUC__) || defined(__clang__)
extern int bssn_cuda_prolong3_pack(int wei,
const double *llbc, const double *uubc, const int *extc, const double *func,
const double *llbf, const double *uubf, const int *extf, double *funf,
const double *llbp, const double *uubp,
const double *SoA, int symmetry) __attribute__((weak));
#endif
namespace {
const char *g_parallel_transfer_context = "Parallel::transfer";
int parallel_next_transfer_tag()
{
const char *ctx = g_parallel_transfer_context ? g_parallel_transfer_context : "Parallel::transfer";
unsigned int hash = 5381u;
while (*ctx)
{
hash = ((hash << 5) + hash) + static_cast<unsigned char>(*ctx);
ctx++;
}
return 32 + static_cast<int>(hash % 32000u);
}
struct ParallelTransferContextGuard
{
const char *prev;
explicit ParallelTransferContextGuard(const char *context) : prev(g_parallel_transfer_context)
{
g_parallel_transfer_context = context;
}
~ParallelTransferContextGuard()
{
g_parallel_transfer_context = prev;
}
};
void parallel_report_mpi_error(const char *context, int errcode, int req_no)
{
char errstr[MPI_MAX_ERROR_STRING];
int len = 0;
MPI_Error_string(errcode, errstr, &len);
int myrank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
fprintf(stderr, "MPI failure on rank %d in %s (transfer_ctx=%s, req_no=%d): %.*s\n",
myrank, context, g_parallel_transfer_context, req_no, len, errstr);
fflush(stderr);
}
void parallel_report_mpi_status_errors(const char *context, int req_no,
int status_count, MPI_Status *stats)
{
if (!stats)
return;
int myrank = -1;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
const int count = (status_count >= 0 && status_count <= req_no) ? status_count : req_no;
for (int i = 0; i < count; i++)
{
if (stats[i].MPI_ERROR != MPI_SUCCESS)
{
char errstr[MPI_MAX_ERROR_STRING];
int len = 0;
MPI_Error_string(stats[i].MPI_ERROR, errstr, &len);
fprintf(stderr,
"MPI request failure on rank %d in %s (transfer_ctx=%s, status_idx=%d, source=%d, tag=%d): %.*s\n",
myrank, context, g_parallel_transfer_context, i,
stats[i].MPI_SOURCE, stats[i].MPI_TAG, len, errstr);
}
}
fflush(stderr);
}
int parallel_waitsome_checked(int req_no, MPI_Request *reqs, int *outcount,
int *completed, MPI_Status *stats,
const char *context)
{
MPI_Errhandler old_handler;
MPI_Comm_get_errhandler(MPI_COMM_WORLD, &old_handler);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
int rc = MPI_Waitsome(req_no, reqs, outcount, completed, stats);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, old_handler);
MPI_Errhandler_free(&old_handler);
if (rc != MPI_SUCCESS)
{
parallel_report_mpi_status_errors(context, req_no, outcount ? *outcount : req_no, stats);
parallel_report_mpi_error(context, rc, req_no);
MPI_Abort(MPI_COMM_WORLD, 1);
}
return rc;
}
int parallel_waitall_checked(int req_no, MPI_Request *reqs, MPI_Status *stats,
const char *context)
{
MPI_Errhandler old_handler;
MPI_Comm_get_errhandler(MPI_COMM_WORLD, &old_handler);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
int rc = MPI_Waitall(req_no, reqs, stats);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, old_handler);
MPI_Errhandler_free(&old_handler);
if (rc != MPI_SUCCESS)
{
parallel_report_mpi_status_errors(context, req_no, req_no, stats);
parallel_report_mpi_error(context, rc, req_no);
MPI_Abort(MPI_COMM_WORLD, 1);
}
return rc;
}
} // namespace
int Parallel::partition1(int &nx, int split_size, int min_width, int cpusize, int shape) // special for 1 diemnsion
{
@@ -3779,11 +3894,18 @@ int Parallel::data_packer(double *data, MyList<Parallel::gridseg> *src, MyList<P
src->data->Bg->bbox, src->data->Bg->bbox + dim, src->data->Bg->shape, src->data->Bg->fgfs[varls->data->sgfn],
dst->data->llb, dst->data->uub, varls->data->SoA, Symmetry);
break;
case 3:
f_prolong3(DIM, src->data->Bg->bbox, src->data->Bg->bbox + dim, src->data->Bg->shape, src->data->Bg->fgfs[varls->data->sgfn],
dst->data->llb, dst->data->uub, dst->data->shape, data + size_out,
dst->data->llb, dst->data->uub, varls->data->SoA, Symmetry);
}
case 3:
if (!bssn_cuda_prolong3_pack ||
bssn_cuda_prolong3_pack(DIM,
src->data->Bg->bbox, src->data->Bg->bbox + dim, src->data->Bg->shape, src->data->Bg->fgfs[varls->data->sgfn],
dst->data->llb, dst->data->uub, dst->data->shape, data + size_out,
dst->data->llb, dst->data->uub, varls->data->SoA, Symmetry))
{
f_prolong3(DIM, src->data->Bg->bbox, src->data->Bg->bbox + dim, src->data->Bg->shape, src->data->Bg->fgfs[varls->data->sgfn],
dst->data->llb, dst->data->uub, dst->data->shape, data + size_out,
dst->data->llb, dst->data->uub, varls->data->SoA, Symmetry);
}
}
if (dir == UNPACK) // from target data to corresponding grid
f_copy(DIM, dst->data->Bg->bbox, dst->data->Bg->bbox + dim, dst->data->Bg->shape, dst->data->Bg->fgfs[varld->data->sgfn],
dst->data->llb, dst->data->uub, dst->data->shape, data + size_out,
@@ -3896,10 +4018,11 @@ void Parallel::transfer(MyList<Parallel::gridseg> **src, MyList<Parallel::gridse
MPI_Request *reqs = new MPI_Request[2 * cpusize];
MPI_Status *stats = new MPI_Status[2 * cpusize];
int *req_node = new int[2 * cpusize];
int *req_is_recv = new int[2 * cpusize];
int *completed = new int[2 * cpusize];
int req_no = 0;
int pending_recv = 0;
int *req_is_recv = new int[2 * cpusize];
int *completed = new int[2 * cpusize];
int req_no = 0;
int pending_recv = 0;
const int mpi_tag = parallel_next_transfer_tag();
double **send_data = new double *[cpusize];
double **rec_data = new double *[cpusize];
@@ -3913,31 +4036,31 @@ void Parallel::transfer(MyList<Parallel::gridseg> **src, MyList<Parallel::gridse
}
// Post receives first so peers can progress rendezvous early.
for (node = 0; node < cpusize; node++)
{
if (node == myrank) continue;
recv_lengths[node] = data_packer(0, src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
if (recv_lengths[node] > 0)
{
for (node = 0; node < cpusize; node++)
{
if (node == myrank) continue;
recv_lengths[node] = data_packer(0, src[node], dst[node], node, UNPACK, VarList1, VarList2, Symmetry);
if (recv_lengths[node] > 0)
{
rec_data[node] = new double[recv_lengths[node]];
if (!rec_data[node])
{
cout << "out of memory when new in short transfer, place 1" << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Irecv((void *)rec_data[node], recv_lengths[node], MPI_DOUBLE, node, 1, MPI_COMM_WORLD, reqs + req_no);
MPI_Irecv((void *)rec_data[node], recv_lengths[node], MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 1;
req_no++;
pending_recv++;
}
}
// Local transfer on this rank.
recv_lengths[myrank] = data_packer(0, src[myrank], dst[myrank], myrank, PACK, VarList1, VarList2, Symmetry);
if (recv_lengths[myrank] > 0)
{
}
// Local transfer on this rank.
recv_lengths[myrank] = data_packer(0, src[myrank], dst[myrank], myrank, PACK, VarList1, VarList2, Symmetry);
if (recv_lengths[myrank] > 0)
{
rec_data[myrank] = new double[recv_lengths[myrank]];
if (!rec_data[myrank])
{
@@ -3947,33 +4070,33 @@ void Parallel::transfer(MyList<Parallel::gridseg> **src, MyList<Parallel::gridse
data_packer(rec_data[myrank], src[myrank], dst[myrank], myrank, PACK, VarList1, VarList2, Symmetry);
}
// Pack and post sends.
for (node = 0; node < cpusize; node++)
{
if (node == myrank) continue;
send_lengths[node] = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
if (send_lengths[node] > 0)
{
send_data[node] = new double[send_lengths[node]];
// Pack and post sends.
for (node = 0; node < cpusize; node++)
{
if (node == myrank) continue;
send_lengths[node] = data_packer(0, src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
if (send_lengths[node] > 0)
{
send_data[node] = new double[send_lengths[node]];
if (!send_data[node])
{
cout << "out of memory when new in short transfer, place 3" << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
data_packer(send_data[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
MPI_Isend((void *)send_data[node], send_lengths[node], MPI_DOUBLE, node, 1, MPI_COMM_WORLD, reqs + req_no);
MPI_Isend((void *)send_data[node], send_lengths[node], MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 0;
req_no++;
}
}
// Unpack as soon as receive completes to reduce pure wait time.
while (pending_recv > 0)
req_no++;
}
}
// Unpack as soon as receive completes to reduce pure wait time.
while (pending_recv > 0)
{
int outcount = 0;
MPI_Waitsome(req_no, reqs, &outcount, completed, stats);
parallel_waitsome_checked(req_no, reqs, &outcount, completed, stats,
"Parallel::transfer");
if (outcount == MPI_UNDEFINED) break;
for (int i = 0; i < outcount; i++)
@@ -3988,7 +4111,7 @@ void Parallel::transfer(MyList<Parallel::gridseg> **src, MyList<Parallel::gridse
}
}
if (req_no > 0) MPI_Waitall(req_no, reqs, stats);
if (req_no > 0) parallel_waitall_checked(req_no, reqs, stats, "Parallel::transfer");
if (rec_data[myrank])
data_packer(rec_data[myrank], src[myrank], dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
@@ -4005,12 +4128,12 @@ void Parallel::transfer(MyList<Parallel::gridseg> **src, MyList<Parallel::gridse
delete[] stats;
delete[] req_node;
delete[] req_is_recv;
delete[] completed;
delete[] send_data;
delete[] rec_data;
delete[] send_lengths;
delete[] recv_lengths;
}
delete[] completed;
delete[] send_data;
delete[] rec_data;
delete[] send_lengths;
delete[] recv_lengths;
}
//
void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gridseg> **dst,
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /*target */,
@@ -4027,8 +4150,9 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
int *req_node = new int[2 * cpusize];
int *req_is_recv = new int[2 * cpusize];
int *completed = new int[2 * cpusize];
int req_no = 0;
int pending_recv = 0;
int req_no = 0;
int pending_recv = 0;
const int mpi_tag = parallel_next_transfer_tag();
double **send_data = new double *[cpusize];
double **rec_data = new double *[cpusize];
@@ -4055,7 +4179,7 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
cout << "out of memory when new in short transfer, place 1" << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
MPI_Irecv((void *)rec_data[node], recv_lengths[node], MPI_DOUBLE, node, 1, MPI_COMM_WORLD, reqs + req_no);
MPI_Irecv((void *)rec_data[node], recv_lengths[node], MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 1;
req_no++;
@@ -4091,7 +4215,7 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
MPI_Abort(MPI_COMM_WORLD, 1);
}
data_packermix(send_data[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
MPI_Isend((void *)send_data[node], send_lengths[node], MPI_DOUBLE, node, 1, MPI_COMM_WORLD, reqs + req_no);
MPI_Isend((void *)send_data[node], send_lengths[node], MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 0;
req_no++;
@@ -4102,7 +4226,8 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
while (pending_recv > 0)
{
int outcount = 0;
MPI_Waitsome(req_no, reqs, &outcount, completed, stats);
parallel_waitsome_checked(req_no, reqs, &outcount, completed, stats,
"Parallel::transfermix");
if (outcount == MPI_UNDEFINED) break;
for (int i = 0; i < outcount; i++)
@@ -4117,7 +4242,7 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
}
}
if (req_no > 0) MPI_Waitall(req_no, reqs, stats);
if (req_no > 0) parallel_waitall_checked(req_no, reqs, stats, "Parallel::transfermix");
if (rec_data[myrank])
data_packermix(rec_data[myrank], src[myrank], dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
@@ -4134,16 +4259,21 @@ void Parallel::transfermix(MyList<Parallel::gridseg> **src, MyList<Parallel::gri
delete[] stats;
delete[] req_node;
delete[] req_is_recv;
delete[] completed;
delete[] send_data;
delete[] rec_data;
delete[] send_lengths;
delete[] recv_lengths;
}
void Parallel::Sync(Patch *Pat, MyList<var> *VarList, int Symmetry)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
delete[] completed;
delete[] send_data;
delete[] rec_data;
delete[] send_lengths;
delete[] recv_lengths;
}
void Parallel::Sync(Patch *Pat, MyList<var> *VarList, int Symmetry)
{
Sync(Pat, VarList, Symmetry, "Parallel::Sync(Patch)");
}
void Parallel::Sync(Patch *Pat, MyList<var> *VarList, int Symmetry, const char *context)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
MyList<Parallel::gridseg> *dst;
MyList<Parallel::gridseg> **src, **transfer_src, **transfer_dst;
@@ -4159,7 +4289,10 @@ void Parallel::Sync(Patch *Pat, MyList<var> *VarList, int Symmetry)
// but for transfer_dst[node] the data may locate on any node
}
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx(context ? context : "Parallel::Sync(Patch)");
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
}
if (dst)
dst->destroyList();
@@ -4177,15 +4310,26 @@ void Parallel::Sync(Patch *Pat, MyList<var> *VarList, int Symmetry)
delete[] transfer_src;
delete[] transfer_dst;
}
void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
{
// Patch inner Synch
MyList<Patch> *Pp = PatL;
while (Pp)
{
Sync(Pp->data, VarList, Symmetry);
Pp = Pp->next;
}
void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
{
Sync(PatL, VarList, Symmetry, "Parallel::Sync(PatchList)");
}
void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, const char *context)
{
std::string patchlist_context = context ? std::string(context) + " [patchlist]" : "Parallel::Sync(PatchList)";
// Patch inner Synch
MyList<Patch> *Pp = PatL;
int patch_index = 0;
while (Pp)
{
std::string patch_context = context ? std::string(context) + " [patch " + std::to_string(patch_index) + "]"
: "Parallel::Sync(Patch)";
Sync(Pp->data, VarList, Symmetry, patch_context.c_str());
Pp = Pp->next;
patch_index++;
}
// Patch inter Synch
int cpusize;
@@ -4204,7 +4348,10 @@ void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx(patchlist_context.c_str());
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
}
if (dst)
dst->destroyList();
@@ -4224,10 +4371,15 @@ void Parallel::Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
}
// Merged Sync: collect all intra-patch and inter-patch grid segment lists,
// then issue a single transfer() call instead of N+1 separate ones.
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry)
{
Sync_merged(PatL, VarList, Symmetry, "Parallel::Sync_merged");
}
void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, const char *context)
{
int cpusize;
MPI_Comm_size(MPI_COMM_WORLD, &cpusize);
MyList<Parallel::gridseg> **combined_src = new MyList<Parallel::gridseg> *[cpusize];
MyList<Parallel::gridseg> **combined_dst = new MyList<Parallel::gridseg> *[cpusize];
@@ -4301,8 +4453,11 @@ void Parallel::Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmet
if (dst_buffer)
dst_buffer->destroyList();
// Phase C: Single transfer
transfer(combined_src, combined_dst, VarList, VarList, Symmetry);
// Phase C: Single transfer
{
ParallelTransferContextGuard transfer_ctx(context ? context : "Parallel::Sync_merged");
transfer(combined_src, combined_dst, VarList, VarList, Symmetry);
}
// Phase D: Cleanup
for (int node = 0; node < cpusize; node++)
@@ -4380,8 +4535,9 @@ void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel:
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
int req_no = 0;
int pending_recv = 0;
int req_no = 0;
int pending_recv = 0;
const int mpi_tag = parallel_next_transfer_tag();
int node;
int *req_node = cache.tc_req_node;
int *req_is_recv = cache.tc_req_is_recv;
@@ -4402,7 +4558,7 @@ void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel:
cache.recv_bufs[node] = new double[rlength];
cache.recv_buf_caps[node] = rlength;
}
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no);
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, cache.reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 1;
req_no++;
@@ -4440,7 +4596,7 @@ void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel:
cache.send_buf_caps[node] = slength;
}
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, cache.reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 0;
req_no++;
@@ -4451,7 +4607,8 @@ void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel:
while (pending_recv > 0)
{
int outcount = 0;
MPI_Waitsome(req_no, cache.reqs, &outcount, completed, cache.stats);
parallel_waitsome_checked(req_no, cache.reqs, &outcount, completed, cache.stats,
"Parallel::transfer_cached");
if (outcount == MPI_UNDEFINED) break;
for (int i = 0; i < outcount; i++)
@@ -4466,11 +4623,11 @@ void Parallel::transfer_cached(MyList<Parallel::gridseg> **src, MyList<Parallel:
}
}
if (req_no > 0) MPI_Waitall(req_no, cache.reqs, cache.stats);
if (req_no > 0) parallel_waitall_checked(req_no, cache.reqs, cache.stats, "Parallel::transfer_cached");
if (self_len > 0)
data_packer(cache.recv_bufs[myrank], src[myrank], dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
}
if (self_len > 0)
data_packer(cache.recv_bufs[myrank], src[myrank], dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
}
void Parallel::Sync_cached(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, SyncCache &cache)
{
if (!cache.valid)
@@ -4669,12 +4826,13 @@ void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetr
}
// Now pack and post async MPI operations
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
state.req_no = 0;
state.active = true;
state.pending_recv = 0;
int myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
state.req_no = 0;
state.active = true;
state.mpi_tag = parallel_next_transfer_tag();
state.pending_recv = 0;
// Allocate tracking arrays
delete[] state.req_node; delete[] state.req_is_recv;
state.req_node = new int[cache.max_reqs];
@@ -4725,7 +4883,7 @@ void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetr
data_packer(cache.send_bufs[node], src[myrank], dst[myrank], node, PACK, VarList, VarList, Symmetry);
state.req_node[state.req_no] = node;
state.req_is_recv[state.req_no] = 0;
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, state.mpi_tag, MPI_COMM_WORLD, cache.reqs + state.req_no++);
}
int rlength;
if (!cache.lengths_valid) {
@@ -4745,7 +4903,7 @@ void Parallel::Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetr
state.req_node[state.req_no] = node;
state.req_is_recv[state.req_no] = 1;
state.pending_recv++;
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 2, MPI_COMM_WORLD, cache.reqs + state.req_no++);
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, state.mpi_tag, MPI_COMM_WORLD, cache.reqs + state.req_no++);
}
}
}
@@ -4775,7 +4933,8 @@ void Parallel::Sync_finish(SyncCache &cache, AsyncSyncState &state,
while (pending > 0)
{
int outcount = 0;
MPI_Waitsome(state.req_no, cache.reqs, &outcount, completed, cache.stats);
parallel_waitsome_checked(state.req_no, cache.reqs, &outcount, completed, cache.stats,
"Parallel::Sync_finish");
if (outcount == MPI_UNDEFINED) break;
for (int i = 0; i < outcount; i++)
{
@@ -4791,8 +4950,8 @@ void Parallel::Sync_finish(SyncCache &cache, AsyncSyncState &state,
delete[] completed;
}
// Wait for remaining sends
if (state.req_no > 0) MPI_Waitall(state.req_no, cache.reqs, cache.stats);
// Wait for remaining sends
if (state.req_no > 0) parallel_waitall_checked(state.req_no, cache.reqs, cache.stats, "Parallel::Sync_finish");
delete[] state.req_node; state.req_node = 0;
delete[] state.req_is_recv; state.req_is_recv = 0;
@@ -5235,7 +5394,10 @@ void Parallel::PeriodicBD(Patch *Pat, MyList<var> *VarList, int Symmetry)
build_PhysBD_gstl(Pat, src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::PeriodicBD");
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5506,7 +5668,10 @@ void Parallel::Prolong(Patch *Patc, Patch *Patf,
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Prolong");
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5565,7 +5730,10 @@ void Parallel::Restrict(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Restrict");
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5609,7 +5777,10 @@ void Parallel::Restrict_after(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Restrict_after");
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5654,7 +5825,10 @@ void Parallel::OutBdLow2Hi(Patch *Patc, Patch *Patf,
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::OutBdLow2Hi(Patch)");
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5710,7 +5884,10 @@ void Parallel::OutBdLow2Hi(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::OutBdLow2Hi(PatchList)");
transfer(transfer_src, transfer_dst, VarList1, VarList2, Symmetry);
}
if (dst)
dst->destroyList();
@@ -5981,13 +6158,14 @@ void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
int myrank;
MPI_Comm_size(MPI_COMM_WORLD, &cache.cpusize);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
int cpusize = cache.cpusize;
int req_no = 0;
int pending_recv = 0;
int *req_node = new int[cache.max_reqs];
int *req_is_recv = new int[cache.max_reqs];
int *completed = new int[cache.max_reqs];
int cpusize = cache.cpusize;
int req_no = 0;
int pending_recv = 0;
const int mpi_tag = parallel_next_transfer_tag();
int *req_node = new int[cache.max_reqs];
int *req_is_recv = new int[cache.max_reqs];
int *completed = new int[cache.max_reqs];
// Post receives first so peers can progress rendezvous early.
for (int node = 0; node < cpusize; node++)
@@ -6004,7 +6182,7 @@ void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
cache.recv_bufs[node] = new double[rlength];
cache.recv_buf_caps[node] = rlength;
}
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no);
MPI_Irecv((void *)cache.recv_bufs[node], rlength, MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, cache.reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 1;
req_no++;
@@ -6042,7 +6220,7 @@ void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
cache.send_buf_caps[node] = slength;
}
data_packermix(cache.send_bufs[node], cache.combined_src[myrank], cache.combined_dst[myrank], node, PACK, VarList1, VarList2, Symmetry);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, 1, MPI_COMM_WORLD, cache.reqs + req_no);
MPI_Isend((void *)cache.send_bufs[node], slength, MPI_DOUBLE, node, mpi_tag, MPI_COMM_WORLD, cache.reqs + req_no);
req_node[req_no] = node;
req_is_recv[req_no] = 0;
req_no++;
@@ -6053,7 +6231,8 @@ void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
while (pending_recv > 0)
{
int outcount = 0;
MPI_Waitsome(req_no, cache.reqs, &outcount, completed, cache.stats);
parallel_waitsome_checked(req_no, cache.reqs, &outcount, completed, cache.stats,
"Parallel::transfermix_cached");
if (outcount == MPI_UNDEFINED) break;
for (int i = 0; i < outcount; i++)
@@ -6068,10 +6247,10 @@ void Parallel::OutBdLow2Himix_cached(MyList<Patch> *PatcL, MyList<Patch> *PatfL,
}
}
if (req_no > 0) MPI_Waitall(req_no, cache.reqs, cache.stats);
if (req_no > 0) parallel_waitall_checked(req_no, cache.reqs, cache.stats, "Parallel::transfermix_cached");
if (self_len > 0)
data_packermix(cache.recv_bufs[myrank], cache.combined_src[myrank], cache.combined_dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
if (self_len > 0)
data_packermix(cache.recv_bufs[myrank], cache.combined_src[myrank], cache.combined_dst[myrank], myrank, UNPACK, VarList1, VarList2, Symmetry);
delete[] req_node;
delete[] req_is_recv;
@@ -6787,7 +6966,10 @@ void Parallel::fill_level_data(MyList<Patch> *PatLd, MyList<Patch> *PatLs, MyLis
build_gstl(src[node], dst, &transfer_src[node], &transfer_dst[node]); // for transfer[node], data locate on cpu#node
}
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Interp copy");
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
}
for (int node = 0; node < cpusize; node++)
{
@@ -6826,7 +7008,10 @@ void Parallel::fill_level_data(MyList<Patch> *PatLd, MyList<Patch> *PatLs, MyLis
Sync(PatcL, FutureList, Symmetry);
}
//<~~~prolong then
transfer(transfer_src, transfer_dst, FutureList, FutureList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Interp prolong FutureList");
transfer(transfer_src, transfer_dst, FutureList, FutureList, Symmetry);
}
// for StateList
// time interpolation part
@@ -6842,7 +7027,10 @@ void Parallel::fill_level_data(MyList<Patch> *PatLd, MyList<Patch> *PatLs, MyLis
Sync(PatcL, tmList, Symmetry);
}
//<~~~prolong then
transfer(transfer_src, transfer_dst, tmList, StateList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Interp prolong StateList");
transfer(transfer_src, transfer_dst, tmList, StateList, Symmetry);
}
}
else
{
@@ -6853,7 +7041,10 @@ void Parallel::fill_level_data(MyList<Patch> *PatLd, MyList<Patch> *PatLs, MyLis
Sync(PatcL, VarList, Symmetry);
}
//<~~~prolong then
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
{
ParallelTransferContextGuard transfer_ctx("Parallel::Interp prolong VarList");
transfer(transfer_src, transfer_dst, VarList, VarList, Symmetry);
}
}
for (int node = 0; node < cpusize; node++)

View File

@@ -89,9 +89,12 @@ namespace Parallel
void transfermix(MyList<gridseg> **src, MyList<gridseg> **dst,
MyList<var> *VarList1 /* source */, MyList<var> *VarList2 /*target */,
int Symmetry);
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry);
void Sync(Patch *Pat, MyList<var> *VarList, int Symmetry, const char *context);
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
void Sync(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, const char *context);
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry);
void Sync_merged(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry, const char *context);
struct SyncCache {
bool valid;
@@ -121,14 +124,15 @@ namespace Parallel
MyList<var> *VarList1, MyList<var> *VarList2,
int Symmetry, SyncCache &cache);
struct AsyncSyncState {
int req_no;
bool active;
int *req_node;
int *req_is_recv;
int pending_recv;
AsyncSyncState() : req_no(0), active(false), req_node(0), req_is_recv(0), pending_recv(0) {}
};
struct AsyncSyncState {
int req_no;
bool active;
int mpi_tag;
int *req_node;
int *req_is_recv;
int pending_recv;
AsyncSyncState() : req_no(0), active(false), mpi_tag(0), req_node(0), req_is_recv(0), pending_recv(0) {}
};
void Sync_start(MyList<Patch> *PatL, MyList<var> *VarList, int Symmetry,
SyncCache &cache, AsyncSyncState &state);

View File

@@ -14,7 +14,8 @@ using namespace std;
#include <string.h>
#endif
#include <time.h>
#include <time.h>
#include <unistd.h>
#include "macrodef.h"
#include "misc.h"
@@ -2036,9 +2037,10 @@ void bssn_class::Read_Ansorg()
void bssn_class::Evolve(int Steps)
{
clock_t prev_clock, curr_clock;
double LastDump = 0.0, LastCheck = 0.0, Last2dDump = 0.0;
LastAnas = 0;
clock_t prev_clock, curr_clock;
double LastDump = 0.0, LastCheck = 0.0, Last2dDump = 0.0;
LastAnas = 0;
LastConsOut = 0;
#if 0
//initial checkpoint for special uasge
{
@@ -2296,18 +2298,21 @@ void bssn_class::Evolve(int Steps)
////////////////////////////////////////////////////////////
// When LastCheck >= CheckTime, perform runtime checks and output status data
if (LastCheck >= CheckTime)
{
LastCheck = 0;
CheckPoint->write_Black_Hole_position(BH_num_input, BH_num, Porg0, Porgbr, Mass);
if (LastCheck >= CheckTime)
{
LastCheck = 0;
CheckPoint->write_Black_Hole_position(BH_num_input, BH_num, Porg0, Porgbr, Mass);
CheckPoint->writecheck_cgh(PhysTime, GH);
#ifdef WithShell
CheckPoint->writecheck_sh(PhysTime, SH);
#endif
CheckPoint->write_bssn(LastDump, Last2dDump, LastAnas);
}
}
#endif
CheckPoint->write_bssn(LastDump, Last2dDump, LastAnas);
}
// Keep output/analysis phases aligned across ranks before the next coarse step.
MPI_Barrier(MPI_COMM_WORLD);
}
/*
#ifdef With_AHF
// final apparent horizon finding
@@ -3026,9 +3031,14 @@ void bssn_class::RecursiveStep(int lev, int num) // in all 2^(lev+1)-1 steps
#if (PSTR == 0)
#if 1
void bssn_class::Step(int lev, int YN)
{
setpbh(BH_num, Porg0, Mass, BH_num_input);
void bssn_class::Step(int lev, int YN)
{
#ifdef USE_GPU
Step_MainPath_GPU(lev, YN);
return;
#endif
setpbh(BH_num, Porg0, Mass, BH_num_input);
double dT_lev = dT * pow(0.5, Mymax(lev, trfls));
@@ -6248,7 +6258,7 @@ for(int ilev = GH->levels-1;ilev>=lev;ilev--)
for(int ilev=GH->levels-1;ilev>lev;ilev--)
RestrictProlong(ilev,1,false,DG_List,DG_List,DG_List);
#else
Parallel::Sync(GH->PatL[lev], DG_List, Symmetry);
Parallel::Sync(GH->PatL[lev], DG_List, Symmetry, "bssn_class::Compute_Psi4");
#endif
#ifdef WithShell
@@ -6903,10 +6913,10 @@ void bssn_class::AnalysisStuff(int lev, double dT_lev)
{
LastAnas += dT_lev;
if (LastAnas >= AnasTime)
{
#ifdef Point_Psi4
#error "not support parallel levels yet"
if (LastAnas >= AnasTime)
{
#ifdef Point_Psi4
#error "not support parallel levels yet"
// Gam_ijk and R_ij have been calculated in Interp_Constraint()
double SYM = 1, ANT = -1;
for (int levh = lev; levh < GH->levels; levh++)
@@ -7250,9 +7260,9 @@ void bssn_class::AnalysisStuff(int lev, double dT_lev)
//================================================================================================
void bssn_class::Constraint_Out()
{
LastConsOut += dT * pow(0.5, Mymax(0, trfls));
void bssn_class::Constraint_Out()
{
LastConsOut += dT * pow(0.5, Mymax(0, trfls));
if (LastConsOut >= AnasTime)
// Constraint violation
@@ -7317,7 +7327,7 @@ void bssn_class::Constraint_Out()
Pp = Pp->next;
}
}
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry);
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry, "bssn_class::Constraint_Out[level]");
}
#ifdef WithShell
if (0) // if the constrait quantities can be reused from the step rhs calculation
@@ -7539,7 +7549,7 @@ void bssn_class::AH_Prepare_derivatives()
}
Pp = Pp->next;
}
Parallel::Sync(GH->PatL[lev], AHDList, Symmetry);
Parallel::Sync(GH->PatL[lev], AHDList, Symmetry, "bssn_class::AH_Prepare_derivatives");
}
}
@@ -7820,7 +7830,7 @@ void bssn_class::Interp_Constraint(bool infg)
Pp = Pp->next;
}
}
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry);
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry, "bssn_class::Interp_Constraint[level]");
}
#ifdef WithShell
if (0) // if the constrait quantities can be reused from the step rhs calculation
@@ -8078,7 +8088,7 @@ void bssn_class::Compute_Constraint()
Pp = Pp->next;
}
}
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry);
Parallel::Sync(GH->PatL[lev], ConstraintList, Symmetry, "bssn_class::Compute_Constraint[level]");
}
// prolong restrict constraint quantities
for (lev = GH->levels - 1; lev > 0; lev--)
@@ -8391,12 +8401,18 @@ void bssn_class::Enforce_algcon(int lev, int fg)
//================================================================================================
bool bssn_class::check_Stdin_Abort()
{
fd_set readfds;
struct timeval timeout;
bool bssn_class::check_Stdin_Abort()
{
// Non-interactive launches (mpirun via Python/subprocess, batch jobs, redirected stdin)
// should not probe stdin. Some MPI runtimes treat stdin as a managed channel and can
// fail when rank 0 polls/consumes it.
if (!isatty(STDIN_FILENO)) {
return false;
}
fd_set readfds;
struct timeval timeout;
FD_ZERO(&readfds);
FD_SET(STDIN_FILENO, &readfds);
@@ -8405,14 +8421,17 @@ bool bssn_class::check_Stdin_Abort()
timeout.tv_sec = 0;
timeout.tv_usec = 0;
int activity = select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, &timeout);
if (activity > 0 && FD_ISSET(STDIN_FILENO, &readfds)) {
string input_abort;
if (cin >> input_abort) {
if (input_abort == "stop") {
return true;
}
int activity = select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, &timeout);
if (activity <= 0) {
return false;
}
if (FD_ISSET(STDIN_FILENO, &readfds)) {
string input_abort;
if (cin >> input_abort) {
if (input_abort == "stop") {
return true;
}
}
}

View File

@@ -171,16 +171,19 @@ public:
bool check_Stdin_Abort();
virtual void Setup_Initial_Data_Cao();
virtual void Setup_Initial_Data_Lousto();
virtual void Initialize();
virtual void Read_Ansorg();
virtual void Read_Pablo() {};
virtual void Compute_Psi4(int lev);
virtual void Step(int lev, int YN);
virtual void Interp_Constraint(bool infg);
virtual void Constraint_Out();
virtual void Compute_Constraint();
virtual void Setup_Initial_Data_Cao();
virtual void Setup_Initial_Data_Lousto();
virtual void Initialize();
virtual void Read_Ansorg();
virtual void Read_Pablo() {};
virtual void Compute_Psi4(int lev);
virtual void Step(int lev, int YN);
#ifdef USE_GPU
void Step_MainPath_GPU(int lev, int YN);
#endif
virtual void Interp_Constraint(bool infg);
virtual void Constraint_Out();
virtual void Compute_Constraint();
#ifdef With_AHF
protected:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
#ifndef BSSN_CUDA_OPS_H
#define BSSN_CUDA_OPS_H
int bssn_cuda_enforce_ga(int *ex,
double *dxx, double *gxy, double *gxz,
double *dyy, double *gyz, double *dzz,
double *Axx, double *Axy, double *Axz,
double *Ayy, double *Ayz, double *Azz);
int bssn_cuda_rk4_boundary_var(int *ex, double dT,
const double *X, const double *Y, const double *Z,
double xmin, double ymin, double zmin,
double xmax, double ymax, double zmax,
const double *state0,
const double *boundary_src,
double *stage_data,
double *rhs_accum,
double propspeed,
const double SoA[3],
int symmetry,
int lev,
int rk_stage,
bool download_to_host = true);
int bssn_cuda_lowerbound(int *ex, double *chi, double tinny, bool download_to_host = true);
int bssn_cuda_download_buffer(int *ex, double *host_ptr);
int bssn_cuda_prolong3_pack(int wei,
const double *llbc, const double *uubc, const int *extc, const double *func,
const double *llbf, const double *uubf, const int *extf, double *funf,
const double *llbp, const double *uubp,
const double *SoA, int symmetry);
int bssn_cuda_interp_points_batch(const int *ex,
const double *X, const double *Y, const double *Z,
const double *const *fields,
const double *soa_flat,
int num_var,
const double *px, const double *py, const double *pz,
int num_points,
int ordn,
int symmetry,
double *out);
#endif

View File

@@ -0,0 +1,479 @@
#include "macrodef.h"
#ifdef USE_GPU
#include <cmath>
#include <vector>
#include "bssn_class.h"
#include "bssn_cuda_ops.h"
#include "bssn_gpu.h"
#include "bssn_macro.h"
#include "rungekutta4_rout.h"
void bssn_class::Step_MainPath_GPU(int lev, int YN)
{
#ifdef WithShell
#error "Step_MainPath_GPU currently supports Patch grids only."
#endif
if (bssn_gpu_bind_process_device(myrank))
{
cerr << "GPU device bind failure on MPI rank " << myrank << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
bssn_gpu_clear_cached_device_buffers();
setpbh(BH_num, Porg0, Mass, BH_num_input);
const double dT_lev = dT * pow(0.5, Mymax(lev, trfls));
#if (MAPBH == 1)
if (BH_num > 0 && lev == GH->levels - 1)
{
compute_Porg_rhs(Porg0, Porg_rhs, Sfx0, Sfy0, Sfz0, lev);
for (int ithBH = 0; ithBH < BH_num; ithBH++)
{
for (int ith = 0; ith < 3; ith++)
Porg1[ithBH][ith] = Porg0[ithBH][ith] + Porg_rhs[ithBH][ith] * dT_lev;
if (Symmetry > 0)
Porg1[ithBH][2] = fabs(Porg1[ithBH][2]);
if (Symmetry == 2)
{
Porg1[ithBH][0] = fabs(Porg1[ithBH][0]);
Porg1[ithBH][1] = fabs(Porg1[ithBH][1]);
}
}
}
if (lev == a_lev)
AnalysisStuff(lev, dT_lev);
#endif
#ifdef With_AHF
AH_Step_Find(lev, dT_lev);
#endif
const bool BB = fgt(PhysTime, StartTime, dT_lev / 2);
(void)BB;
double ndeps = (lev < GH->movls) ? numepsb : numepss;
double TRK4 = PhysTime;
int iter_count = 0;
int pre = 0, cor = 1;
int ERROR = 0;
auto run_stage_on_block =
[&](Block *cg, Patch *patch, MyList<var> *state0_list,
MyList<var> *boundary_src_list, MyList<var> *stage_data_list,
MyList<var> *rhs_list, int rk_stage) {
MyList<var> *varl0 = state0_list;
MyList<var> *varlb = boundary_src_list;
MyList<var> *varls = stage_data_list;
MyList<var> *varlr = rhs_list;
while (varl0)
{
if (bssn_cuda_rk4_boundary_var(cg->shape, dT_lev,
cg->X[0], cg->X[1], cg->X[2],
patch->bbox[0], patch->bbox[1], patch->bbox[2],
patch->bbox[3], patch->bbox[4], patch->bbox[5],
cg->fgfs[varl0->data->sgfn],
cg->fgfs[varlb->data->sgfn],
cg->fgfs[varls->data->sgfn],
cg->fgfs[varlr->data->sgfn],
varl0->data->propspeed,
varl0->data->SoA,
Symmetry, lev, rk_stage, false))
{
cerr << "GPU rk4/boundary failure: lev=" << lev
<< " rk_stage=" << rk_stage
<< " var=" << varl0->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
varl0 = varl0->next;
varlb = varlb->next;
varls = varls->next;
varlr = varlr->next;
}
};
auto stage_download_var_list =
[&](Block *cg, MyList<var> *var_list) {
while (var_list)
{
if (bssn_cuda_download_buffer(cg->shape, cg->fgfs[var_list->data->sgfn]))
{
cerr << "GPU stage download failure: lev=" << lev
<< " var=" << var_list->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
var_list = var_list->next;
}
};
auto stage_upload_var_list =
[&](Block *cg, MyList<var> *var_list) {
const int n = cg->shape[0] * cg->shape[1] * cg->shape[2];
while (var_list)
{
if (bssn_gpu_stage_upload_buffer(cg->fgfs[var_list->data->sgfn], n))
{
cerr << "GPU state upload failure: lev=" << lev
<< " var=" << var_list->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
var_list = var_list->next;
}
};
auto ensure_stage_device_var_list =
[&](Block *cg, MyList<var> *var_list) {
const int n = cg->shape[0] * cg->shape[1] * cg->shape[2];
while (var_list)
{
double *host_ptr = cg->fgfs[var_list->data->sgfn];
if (!bssn_gpu_find_device_buffer(host_ptr) &&
bssn_gpu_stage_upload_buffer(host_ptr, n))
{
cerr << "GPU state ensure failure: lev=" << lev
<< " var=" << var_list->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
var_list = var_list->next;
}
};
auto refresh_synced_device_regions =
[&](Block *cg, MyList<var> *var_list, Parallel::SyncCache &cache) {
std::vector<Parallel::gridseg *> local_segments;
for (int node = 0; node < cache.cpusize; ++node)
{
MyList<Parallel::gridseg> *seg = cache.combined_dst[node];
while (seg)
{
if (seg->data && seg->data->Bg == cg)
local_segments.push_back(seg->data);
seg = seg->next;
}
}
if (local_segments.empty())
return;
const int n = cg->shape[0] * cg->shape[1] * cg->shape[2];
while (var_list)
{
double *host_ptr = cg->fgfs[var_list->data->sgfn];
if (!bssn_gpu_find_device_buffer(host_ptr))
{
if (bssn_gpu_stage_upload_buffer(host_ptr, n))
{
cerr << "GPU sync refresh upload failure: lev=" << lev
<< " var=" << var_list->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
}
else
{
for (size_t i = 0; i < local_segments.size(); ++i)
{
Parallel::gridseg *seg = local_segments[i];
if (bssn_gpu_stage_upload_region(host_ptr,
cg->shape,
cg->bbox,
cg->bbox + dim,
seg->shape,
seg->llb))
{
cerr << "GPU sync region refresh failure: lev=" << lev
<< " var=" << var_list->data->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
break;
}
}
if (ERROR)
break;
}
var_list = var_list->next;
}
};
auto refresh_stage_device_after_sync =
[&](MyList<var> *var_list, Parallel::SyncCache &cache) {
MyList<Patch> *patch_it = GH->PatL[lev];
while (patch_it)
{
MyList<Block> *block_it = patch_it->data->blb;
while (block_it)
{
Block *cg = block_it->data;
if (myrank == cg->rank)
refresh_synced_device_regions(cg, var_list, cache);
if (block_it == patch_it->data->ble)
break;
block_it = block_it->next;
}
if (ERROR)
break;
patch_it = patch_it->next;
}
};
MyList<Patch> *Pp = GH->PatL[lev];
while (Pp)
{
MyList<Block> *BP = Pp->data->blb;
while (BP)
{
Block *cg = BP->data;
if (myrank == cg->rank)
{
stage_upload_var_list(cg, StateList);
if (gpu_rhs(CALLED_BY_STEP, myrank, RHS_PARA_CALLED_FIRST_TIME))
ERROR = 1;
run_stage_on_block(cg, Pp->data, StateList, StateList, SynchList_pre, RHSList, iter_count);
if (bssn_cuda_lowerbound(cg->shape, cg->fgfs[phi->sgfn], chitiny, false))
{
cerr << "GPU lowerbound failure: lev=" << lev
<< " rk_stage=" << iter_count
<< " var=" << phi->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
}
if (!ERROR)
stage_download_var_list(cg, SynchList_pre);
}
if (BP == Pp->data->ble)
break;
BP = BP->next;
}
Pp = Pp->next;
}
MPI_Request err_req_pre;
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_pre);
}
Parallel::AsyncSyncState async_pre;
Parallel::Sync_start(GH->PatL[lev], SynchList_pre, Symmetry, sync_cache_pre[lev], async_pre);
Parallel::Sync_finish(sync_cache_pre[lev], async_pre, SynchList_pre, Symmetry);
if (!ERROR)
refresh_stage_device_after_sync(SynchList_pre, sync_cache_pre[lev]);
MPI_Wait(&err_req_pre, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], StateList, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in state variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#if (MAPBH == 0)
if (BH_num > 0 && lev == GH->levels - 1)
{
compute_Porg_rhs(Porg0, Porg_rhs, Sfx0, Sfy0, Sfz0, lev);
for (int ithBH = 0; ithBH < BH_num; ithBH++)
{
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][0], Porg[ithBH][0], Porg_rhs[ithBH][0], iter_count);
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][1], Porg[ithBH][1], Porg_rhs[ithBH][1], iter_count);
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][2], Porg[ithBH][2], Porg_rhs[ithBH][2], iter_count);
if (Symmetry > 0)
Porg[ithBH][2] = fabs(Porg[ithBH][2]);
if (Symmetry == 2)
{
Porg[ithBH][0] = fabs(Porg[ithBH][0]);
Porg[ithBH][1] = fabs(Porg[ithBH][1]);
}
}
}
if (lev == a_lev)
AnalysisStuff(lev, dT_lev);
#endif
for (iter_count = 1; iter_count < 4; iter_count++)
{
if (iter_count == 1 || iter_count == 3)
TRK4 += dT_lev / 2;
Pp = GH->PatL[lev];
while (Pp)
{
MyList<Block> *BP = Pp->data->blb;
while (BP)
{
Block *cg = BP->data;
if (myrank == cg->rank)
{
ensure_stage_device_var_list(cg, SynchList_pre);
if (gpu_rhs(CALLED_BY_STEP, myrank, RHS_PARA_CALLED_THEN))
ERROR = 1;
run_stage_on_block(cg, Pp->data, StateList, SynchList_pre, SynchList_cor, RHSList, iter_count);
if (bssn_cuda_lowerbound(cg->shape, cg->fgfs[phi1->sgfn], chitiny, false))
{
cerr << "GPU lowerbound failure: lev=" << lev
<< " rk_stage=" << iter_count
<< " var=" << phi1->name
<< " bbox=(" << cg->bbox[0] << ":" << cg->bbox[3] << ","
<< cg->bbox[1] << ":" << cg->bbox[4] << ","
<< cg->bbox[2] << ":" << cg->bbox[5] << ")" << endl;
ERROR = 1;
}
if (!ERROR)
stage_download_var_list(cg, SynchList_cor);
}
if (BP == Pp->data->ble)
break;
BP = BP->next;
}
Pp = Pp->next;
}
MPI_Request err_req_cor;
{
int erh = ERROR;
MPI_Iallreduce(&erh, &ERROR, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD, &err_req_cor);
}
Parallel::AsyncSyncState async_cor;
Parallel::Sync_start(GH->PatL[lev], SynchList_cor, Symmetry, sync_cache_cor[lev], async_cor);
Parallel::Sync_finish(sync_cache_cor[lev], async_cor, SynchList_cor, Symmetry);
if (!ERROR && iter_count < 3)
refresh_stage_device_after_sync(SynchList_cor, sync_cache_cor[lev]);
MPI_Wait(&err_req_cor, MPI_STATUS_IGNORE);
if (ERROR)
{
Parallel::Dump_Data(GH->PatL[lev], SynchList_pre, 0, PhysTime, dT_lev);
if (myrank == 0)
{
if (ErrorMonitor->outfile)
ErrorMonitor->outfile << "find NaN in RK4 substep#" << iter_count
<< " variables at t = " << PhysTime
<< ", lev = " << lev << endl;
MPI_Abort(MPI_COMM_WORLD, 1);
}
}
#if (MAPBH == 0)
if (BH_num > 0 && lev == GH->levels - 1)
{
compute_Porg_rhs(Porg, Porg1, Sfx, Sfy, Sfz, lev);
for (int ithBH = 0; ithBH < BH_num; ithBH++)
{
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][0], Porg1[ithBH][0], Porg_rhs[ithBH][0], iter_count);
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][1], Porg1[ithBH][1], Porg_rhs[ithBH][1], iter_count);
f_rungekutta4_scalar(dT_lev, Porg0[ithBH][2], Porg1[ithBH][2], Porg_rhs[ithBH][2], iter_count);
if (Symmetry > 0)
Porg1[ithBH][2] = fabs(Porg1[ithBH][2]);
if (Symmetry == 2)
{
Porg1[ithBH][0] = fabs(Porg1[ithBH][0]);
Porg1[ithBH][1] = fabs(Porg1[ithBH][1]);
}
}
}
#endif
if (iter_count < 3)
{
Pp = GH->PatL[lev];
while (Pp)
{
MyList<Block> *BP = Pp->data->blb;
while (BP)
{
BP->data->swapList(SynchList_pre, SynchList_cor, myrank);
if (BP == Pp->data->ble)
break;
BP = BP->next;
}
Pp = Pp->next;
}
#if (MAPBH == 0)
if (BH_num > 0 && lev == GH->levels - 1)
{
for (int ithBH = 0; ithBH < BH_num; ithBH++)
{
Porg[ithBH][0] = Porg1[ithBH][0];
Porg[ithBH][1] = Porg1[ithBH][1];
Porg[ithBH][2] = Porg1[ithBH][2];
}
}
#endif
}
}
#if (RPS == 0)
RestrictProlong(lev, YN, BB);
#endif
bssn_gpu_clear_cached_device_buffers();
Pp = GH->PatL[lev];
while (Pp)
{
MyList<Block> *BP = Pp->data->blb;
while (BP)
{
Block *cg = BP->data;
cg->swapList(StateList, SynchList_cor, myrank);
cg->swapList(OldStateList, SynchList_cor, myrank);
if (BP == Pp->data->ble)
break;
BP = BP->next;
}
Pp = Pp->next;
}
if (BH_num > 0 && lev == GH->levels - 1)
{
for (int ithBH = 0; ithBH < BH_num; ithBH++)
{
Porg0[ithBH][0] = Porg1[ithBH][0];
Porg0[ithBH][1] = Porg1[ithBH][1];
Porg0[ithBH][2] = Porg1[ithBH][2];
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -4,10 +4,8 @@
#include "bssn_macro.h"
#include "macrodef.fh"
#define DEVICE_ID 0
// #define DEVICE_ID_BY_MPI_RANK
#define GRID_DIM 256
#define BLOCK_DIM 128
#define GRID_DIM 256
#define BLOCK_DIM 128
#define _FH2_(i, j, k) fh[(i) + (j) * _1D_SIZE[2] + (k) * _2D_SIZE[2]]
#define _FH3_(i, j, k) fh[(i) + (j) * _1D_SIZE[3] + (k) * _2D_SIZE[3]]
@@ -65,9 +63,22 @@ int gpu_rhs(int calledby, int mpi_rank, int *ex, double &T,
double *Gmx_Res, double *Gmy_Res, double *Gmz_Res,
int &Symmetry, int &Lev, double &eps, int &co);
int gpu_rhs_ss(RHS_SS_PARA);
/** Init GPU side data in GPUMeta. */
// void init_fluid_meta_gpu(GPUMeta *gpu_meta);
int gpu_rhs_ss(RHS_SS_PARA);
int bssn_gpu_bind_process_device(int mpi_rank);
void bssn_gpu_clear_cached_device_buffers();
const double *bssn_gpu_find_device_buffer(const double *host_ptr);
void bssn_gpu_register_device_buffer(const double *host_ptr, const double *device_ptr);
int bssn_gpu_stage_upload_buffer(const double *host_ptr, int count);
int bssn_gpu_stage_zero_buffer(const double *host_ptr, int count);
int bssn_gpu_stage_upload_region(const double *host_ptr,
const int *full_shape,
const double *full_llb,
const double *full_uub,
const int *region_shape,
const double *region_llb);
/** Init GPU side data in GPUMeta. */
// void init_fluid_meta_gpu(GPUMeta *gpu_meta);
#endif

View File

@@ -30,8 +30,8 @@ CXXAPPFLAGS = -O3 -xHost -fp-model fast=2 -fma -ipo \
f90appflags = -O3 -xHost -fp-model fast=2 -fma -ipo \
-align array64byte -fpp -I${MKLROOT}/include $(POLINT6_FLAG)
endif
.SUFFIXES: .o .f90 .C .for .cu
.SUFFIXES: .o .f90 .C .for .cu
.f90.o:
$(f90) $(f90appflags) -c $< -o $@
@@ -105,13 +105,12 @@ C++FILES = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
Parallel_bam.o scalar_class.o transpbh.o NullShellPatch2.o\
NullShellPatch2_Evo.o writefile_f.o interp_lb_profile.o
C++FILES_GPU = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
cgh.o surface_integral.o ShellPatch.o\
bssnEScalar_class.o perf.o Z4c_class.o NullShellPatch.o\
bssnEM_class.o cpbc_util.o z4c_rhs_point.o checkpoint.o\
Parallel_bam.o scalar_class.o transpbh.o NullShellPatch2.o\
NullShellPatch2_Evo.o \
bssn_gpu_class.o bssn_step_gpu.o bssn_macro.o writefile_f.o
C++FILES_GPU = ABE.o Ansorg.o Block.o misc.o monitor.o Parallel.o MPatch.o var.o\
cgh.o bssn_class.o surface_integral.o ShellPatch.o\
bssnEScalar_class.o perf.o Z4c_class.o NullShellPatch.o\
bssnEM_class.o cpbc_util.o z4c_rhs_point.o checkpoint.o\
Parallel_bam.o scalar_class.o transpbh.o NullShellPatch2.o\
NullShellPatch2_Evo.o bssn_cuda_step.o writefile_f.o
F90FILES_BASE = enforce_algebra.o fmisc.o initial_puncture.o prolongrestrict.o\
prolongrestrict_cell.o prolongrestrict_vertex.o\
@@ -143,7 +142,7 @@ initial_guess.o Newton.o Jacobian.o ilucg.o IntPnts0.o IntPnts.o
TwoPunctureFILES = TwoPunctureABE.o TwoPunctures.o
CUDAFILES = bssn_gpu.o bssn_gpu_rhs_ss.o
CUDAFILES = bssn_gpu.o bssn_cuda_ops.o
# file dependences
$(C++FILES) $(C++FILES_GPU) $(F90FILES) $(CFILES) $(AHFDOBJS) $(CUDAFILES): macrodef.fh

View File

@@ -9,6 +9,7 @@ filein = -I/usr/include/ -I${MKLROOT}/include
## Using sequential MKL (OpenMP disabled for better single-threaded performance)
## Added -lifcore for Intel Fortran runtime and -limf for Intel math library
LDLIBS = -L${MKLROOT}/lib -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -lifcore -limf -lpthread -lm -ldl -liomp5
CUDA_LDLIBS = -L/usr/local/cuda-12.9/targets/x86_64-linux/lib -lcudart
## Memory allocator switch
## 1 (default) : link Intel oneTBB allocator (libtbbmalloc)
@@ -24,6 +25,8 @@ ifeq ($(USE_TBBMALLOC),1)
LDLIBS := $(TBBMALLOC_LIBS) $(LDLIBS)
endif
LDLIBS := $(CUDA_LDLIBS) $(LDLIBS)
## PGO build mode switch (ABE only; TwoPunctureABE always uses opt flags)
## opt : (default) maximum performance with PGO profile-guided optimization
## instrument : PGO Phase 1 instrumentation to collect fresh profile data

View File

@@ -180,19 +180,64 @@ surface_integral::surface_integral(int iSymmetry) : Symmetry(iSymmetry)
//|============================================================================
//| Destructor
//|============================================================================
surface_integral::~surface_integral()
{
delete[] nx_g;
delete[] ny_g;
delete[] nz_g;
delete[] arcostheta;
#ifdef GaussInt
delete[] wtcostheta;
#endif
}
//|----------------------------------------------------------------
// spin weighted spinw component of psi4, general routine
// l takes from spinw to maxl; m takes from -l to l
surface_integral::~surface_integral()
{
release_cached_buffers();
delete[] nx_g;
delete[] ny_g;
delete[] nz_g;
delete[] arcostheta;
#ifdef GaussInt
delete[] wtcostheta;
#endif
}
void surface_integral::get_surface_points(double rex, double **pox)
{
SpherePointCache &cache = sphere_point_cache[rex];
if (!cache.pox[0])
{
for (int i = 0; i < 3; ++i)
cache.pox[i] = new double[n_tot];
for (int n = 0; n < n_tot; ++n)
{
cache.pox[0][n] = rex * nx_g[n];
cache.pox[1][n] = rex * ny_g[n];
cache.pox[2][n] = rex * nz_g[n];
}
}
pox[0] = cache.pox[0];
pox[1] = cache.pox[1];
pox[2] = cache.pox[2];
}
double *surface_integral::get_shellf_buffer(int num_var)
{
double *&buffer = shellf_cache[num_var];
if (!buffer)
buffer = new double[n_tot * num_var];
return buffer;
}
void surface_integral::release_cached_buffers()
{
for (map<double, SpherePointCache>::iterator it = sphere_point_cache.begin(); it != sphere_point_cache.end(); ++it)
{
delete[] it->second.pox[0];
delete[] it->second.pox[1];
delete[] it->second.pox[2];
it->second.pox[0] = it->second.pox[1] = it->second.pox[2] = 0;
}
sphere_point_cache.clear();
for (map<int, double *>::iterator it = shellf_cache.begin(); it != shellf_cache.end(); ++it)
delete[] it->second;
shellf_cache.clear();
}
//|----------------------------------------------------------------
// spin weighted spinw component of psi4, general routine
// l takes from spinw to maxl; m takes from -l to l
//|----------------------------------------------------------------
void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *Ipsi4,
int spinw, int maxl, int NN, double *RP, double *IP,
@@ -209,16 +254,9 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
MyList<var> *DG_List = new MyList<var>(Rpsi4);
DG_List->insert(Ipsi4);
int n;
double *pox[3];
for (int i = 0; i < 3; i++)
pox[i] = new double[n_tot];
for (n = 0; n < n_tot; n++)
{
pox[0][n] = rex * nx_g[n];
pox[1][n] = rex * ny_g[n];
pox[2][n] = rex * nz_g[n];
}
int n;
double *pox[3];
get_surface_points(rex, pox);
int mp, Lp, Nmin, Nmax;
mp = n_tot / cpusize;
@@ -234,8 +272,7 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
Nmax = Nmin + mp - 1;
}
double *shellf;
shellf = new double[n_tot * InList];
double *shellf = get_shellf_buffer(InList);
GH->PatL[lev]->data->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry, Nmin, Nmax);
@@ -375,14 +412,10 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
//|------= Free memory.
delete[] pox[0];
delete[] pox[1];
delete[] pox[2];
delete[] shellf;
delete[] RP_out;
delete[] IP_out;
DG_List->clearList();
}
delete[] RP_out;
delete[] IP_out;
DG_List->clearList();
}
void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *Ipsi4,
int spinw, int maxl, int NN, double *RP, double *IP,
monitor *Monitor, MPI_Comm Comm_here) // NN is the length of RP and IP
@@ -402,19 +435,11 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
MyList<var> *DG_List = new MyList<var>(Rpsi4);
DG_List->insert(Ipsi4);
int n;
double *pox[3];
for (int i = 0; i < 3; i++)
pox[i] = new double[n_tot];
for (n = 0; n < n_tot; n++)
{
pox[0][n] = rex * nx_g[n];
pox[1][n] = rex * ny_g[n];
pox[2][n] = rex * nz_g[n];
}
double *shellf;
shellf = new double[n_tot * InList];
int n;
double *pox[3];
get_surface_points(rex, pox);
double *shellf = get_shellf_buffer(InList);
// misc::tillherecheck(GH->Commlev[lev],GH->start_rank[lev],"before Interp_Points");
@@ -577,14 +602,10 @@ void surface_integral::surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *
//|------= Free memory.
delete[] pox[0];
delete[] pox[1];
delete[] pox[2];
delete[] shellf;
delete[] RP_out;
delete[] IP_out;
DG_List->clearList();
}
delete[] RP_out;
delete[] IP_out;
DG_List->clearList();
}
//|----------------------------------------------------------------
// for shell patch
//|----------------------------------------------------------------
@@ -597,19 +618,11 @@ void surface_integral::surf_Wave(double rex, int lev, ShellPatch *GH, var *Rpsi4
MyList<var> *DG_List = new MyList<var>(Rpsi4);
DG_List->insert(Ipsi4);
int n;
double *pox[3];
for (int i = 0; i < 3; i++)
pox[i] = new double[n_tot];
for (n = 0; n < n_tot; n++)
{
pox[0][n] = rex * nx_g[n];
pox[1][n] = rex * ny_g[n];
pox[2][n] = rex * nz_g[n];
}
int n;
double *pox[3];
get_surface_points(rex, pox);
double *shellf;
shellf = new double[n_tot * InList];
double *shellf = get_shellf_buffer(InList);
GH->Interp_Points(DG_List, n_tot, pox, shellf, Symmetry);
@@ -2570,12 +2583,8 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
Rout[5] = sy;
Rout[6] = sz;
delete[] pox[0];
delete[] pox[1];
delete[] pox[2];
delete[] shellf;
DG_List->clearList();
}
DG_List->clearList();
}
void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var *trK,
var *gxx, var *gxy, var *gxz, var *gyy, var *gyz, var *gzz,
var *Axx, var *Axy, var *Axz, var *Ayy, var *Ayz, var *Azz,
@@ -2637,19 +2646,11 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
DG_List->insert(Ayz);
DG_List->insert(Azz);
int n;
double *pox[3];
for (int i = 0; i < 3; i++)
pox[i] = new double[n_tot];
for (n = 0; n < n_tot; n++)
{
pox[0][n] = rex * nx_g[n];
pox[1][n] = rex * ny_g[n];
pox[2][n] = rex * nz_g[n];
}
double *shellf;
shellf = new double[n_tot * InList];
int n;
double *pox[3];
get_surface_points(rex, pox);
double *shellf = get_shellf_buffer(InList);
// we have assumed there is only one box on this level,
// so we do not need loop boxes
@@ -2839,12 +2840,8 @@ void surface_integral::surf_MassPAng(double rex, int lev, cgh *GH, var *chi, var
Rout[5] = sy;
Rout[6] = sz;
delete[] pox[0];
delete[] pox[1];
delete[] pox[2];
delete[] shellf;
DG_List->clearList();
}
DG_List->clearList();
}
//|----------------------------------------------------------------
// for shell patch
//|----------------------------------------------------------------

View File

@@ -20,25 +20,41 @@ using namespace std;
#include "cgh.h"
#include "ShellPatch.h"
#include "NullShellPatch.h"
#include "NullShellPatch2.h"
#include "var.h"
#include "monitor.h"
#include "NullShellPatch2.h"
#include "var.h"
#include "monitor.h"
#include <map>
class surface_integral
{
private:
int Symmetry, factor;
int N_theta, N_phi; // Number of points in Theta & Phi directions
double dphi, dcostheta;
double *arcostheta, *wtcostheta;
int n_tot; // size of arrays
double *nx_g, *ny_g, *nz_g; // global list of unit normals
int myrank, cpusize;
public:
surface_integral(int iSymmetry);
private:
struct SpherePointCache
{
double *pox[3];
SpherePointCache()
{
pox[0] = pox[1] = pox[2] = 0;
}
};
int Symmetry, factor;
int N_theta, N_phi; // Number of points in Theta & Phi directions
double dphi, dcostheta;
double *arcostheta, *wtcostheta;
int n_tot; // size of arrays
double *nx_g, *ny_g, *nz_g; // global list of unit normals
int myrank, cpusize;
map<double, SpherePointCache> sphere_point_cache;
map<int, double *> shellf_cache;
void get_surface_points(double rex, double **pox);
double *get_shellf_buffer(int num_var);
void release_cached_buffers();
public:
surface_integral(int iSymmetry);
~surface_integral();
void surf_Wave(double rex, int lev, cgh *GH, var *Rpsi4, var *Ipsi4,

View File

@@ -9,6 +9,7 @@
import AMSS_NCKU_Input as input_data
import os
import subprocess
import time
@@ -57,6 +58,48 @@ BUILD_JOBS = 64
##################################################################
##################################################################
def prepare_gpu_runtime_env():
"""
Create a user-private CUDA MPS environment for GPU runs.
On shared machines another user's daemon may already occupy the default
/tmp/nvidia-mps pipe directory, which makes plain cudaSetDevice/cudaMalloc
fail with cudaErrorMpsConnectionFailed. Binding AMSS-NCKU to a private
pipe directory avoids cross-user interference.
"""
env = os.environ.copy()
pipe_dir = env.get("CUDA_MPS_PIPE_DIRECTORY", f"/tmp/amss-ncku-mps-{os.getuid()}")
log_dir = env.get("CUDA_MPS_LOG_DIRECTORY", f"/tmp/amss-ncku-mps-log-{os.getuid()}")
os.makedirs(pipe_dir, exist_ok=True)
os.makedirs(log_dir, exist_ok=True)
env["CUDA_MPS_PIPE_DIRECTORY"] = pipe_dir
env["CUDA_MPS_LOG_DIRECTORY"] = log_dir
control_socket = os.path.join(pipe_dir, "control")
if not os.path.exists(control_socket):
start = subprocess.run(
["nvidia-cuda-mps-control", "-d"],
env=env,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
if start.returncode != 0:
print(f" Warning: failed to start private CUDA MPS daemon in {pipe_dir}")
else:
print(f" Using private CUDA MPS pipe directory: {pipe_dir}")
else:
print(f" Using existing private CUDA MPS pipe directory: {pipe_dir}")
return env
##################################################################
##################################################################
@@ -146,16 +189,29 @@ def run_ABE():
## Define the command to run; cast other values to strings as needed
run_env = None
if (input_data.GPU_Calculation == "no"):
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
#mpi_command = " mpirun -np " + str(input_data.MPI_processes) + " ./ABE"
mpi_command_outfile = "ABE_out.log"
elif (input_data.GPU_Calculation == "yes"):
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
run_env = prepare_gpu_runtime_env()
if int(input_data.MPI_processes) == 1:
mpi_command = "./ABEGPU"
else:
mpi_command = NUMACTL_CPU_BIND + " mpirun -np " + str(input_data.MPI_processes) + " ./ABEGPU"
mpi_command_outfile = "ABEGPU_out.log"
## Execute the MPI command and stream output
mpi_process = subprocess.Popen(mpi_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
mpi_process = subprocess.Popen(
mpi_command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
env=run_env,
)
## Write ABE run output to file while printing to stdout
with open(mpi_command_outfile, 'w') as file0: