OPALX (Object Oriented Parallel Accelerator Library for Exascale) MINIorX
OPALX
PartBunch.cpp
Go to the documentation of this file.
2#include <boost/numeric/ublas/io.hpp>
3#include "Utilities/Util.h"
4
5#undef doDEBUG
6
7template <typename T, unsigned Dim>
9 double mi,
10 size_t totalP,
11 /*int nt,*/
12 double lbt,
13 std::string integration_method,
14 std::shared_ptr<Distribution> &OPALdistribution,
15 std::shared_ptr<FieldSolverCmd> &OPALFieldSolver)
16 : ippl::PicManager<T, Dim, ParticleContainer<T, Dim>, FieldContainer<T, Dim>, LoadBalancer<T, Dim>>(),
17 time_m(0.0),
18 totalP_m(totalP),
19 //nt_m(nt),
20 lbt_m(lbt),
21 dt_m(0),
22 it_m(0),
23 integration_method_m(integration_method),
24 solver_m(""),
25 isFirstRepartition_m(true),
26 qi_m(qi),
27 mi_m(mi),
28 rmsDensity_m(0.0),
29 RefPartR_m(0.0),
30 RefPartP_m(0.0),
31 localTrackStep_m(0),
32 globalTrackStep_m(0),
33 OPALdist_m(OPALdistribution),
34 OPALFieldSolver_m(OPALFieldSolver) {
35
36 static IpplTimings::TimerRef gatherInfoPartBunch = IpplTimings::getTimer("gatherInfoPartBunch");
37 IpplTimings::startTimer(gatherInfoPartBunch);
38
39 *gmsg << "PartBunch Constructor" << endl;
40
41 // get the needed information from OPAL FieldSolver command
42
44 OPALFieldSolver_m->getNX(), OPALFieldSolver_m->getNY(), OPALFieldSolver_m->getNZ());
45
46 const Vector_t<bool, 3> domainDecomposition = OPALFieldSolver_m->getDomDec();
47
48 for (unsigned i = 0; i < Dim; i++) {
49 this->domain_m[i] = ippl::Index(nr_m[i]);
50 this->decomp_m[i] = domainDecomposition[i];
51 }
52
53 this->setBCHandler(std::make_shared<BCHandler_t>(
54 OPALFieldSolver_m->constructBCHandler()
55 ));
56
58 bool isAllPeriodic = this->getBCHandler()->isAll(BCHandler_t::PERIODIC);
59 *gmsg << "* FieldContainer set to isAllPeriodic = " << isAllPeriodic << endl;
60
61 // set stuff for pre_run i.e. warmup
62 // this will be reset when the correct computational
63 // domain is set
64
65 Vector_t<double, Dim> length (6.0);
66 this->hr_m = length / this->nr_m;
67 this->origin_m = -3.0;
68 this->dt_m = 0.5 / this->nr_m[2];
69
71 rmax_m = origin_m + length;
72
73 this->setFieldContainer(std::make_shared<FieldContainer_t>(
74 hr_m, rmin_m, rmax_m, decomp_m, domain_m, origin_m, isAllPeriodic
75 ));
76
77 this->setParticleContainer(std::make_shared<ParticleContainer_t>(
78 this->fcontainer_m->getMesh(), this->fcontainer_m->getFL()
79 ));
80
81 IpplTimings::stopTimer(gatherInfoPartBunch);
82
83 // ---------------- binning setup ----------------
84 using bin_index_type = typename ParticleContainer_t::bin_index_type;
85 bin_index_type maxBins = Options::maxBins;
86 this->setBins(std::make_shared<AdaptBins_t>(
87 this->getParticleContainer(),
88 BinningSelector_t(2), // TODO: hardcode z axis with coordinate selector at axis index 2
89 static_cast<bin_index_type>(maxBins),
91 ));
92 this->getBins()->debug();
93
94 this->setTempEField(std::make_shared<VField_t<T, Dim>>(
95 this->fcontainer_m->getE()
96 )); // user copy constructor
97 this->getTempEField()->initialize(this->fcontainer_m->getMesh(),
98 this->fcontainer_m->getFL());
99 // -----------------------------------------------
100
101 static IpplTimings::TimerRef setSolverT = IpplTimings::getTimer("setSolver");
102 IpplTimings::startTimer(setSolverT);
103 setSolver(OPALFieldSolver_m->getType());
104 IpplTimings::stopTimer(setSolverT);
105
106
107 static IpplTimings::TimerRef prerun = IpplTimings::getTimer("prerun");
109 pre_run();
111
112
113 globalPartPerNode_m = std::make_unique<size_t[]>(ippl::Comm->size());
114
115 *gmsg << "* PartBunch constructor done." << endl;
116}
117
118template <typename T, unsigned Dim>
121 typename Base::particle_position_type* Ep = &this->pcontainer_m->E;
122 typename Base::particle_position_type* R = &this->pcontainer_m->R;
123 VField_t<T, Dim>* Ef = &this->fcontainer_m->getE();
124 gather(*Ep, *Ef, *R);
125}
126
127template <typename T, unsigned Dim>
130 std::shared_ptr<FieldContainer_t> fc = this->fcontainer_m;
131
132 size_type totalP = this->getTotalNum();
133
134 if (this->loadbalancer_m->balance(totalP)) {
135 auto* mesh = &fc->getRho().get_mesh();
136 auto* FL = &fc->getFL();
137 this->loadbalancer_m->repartition(FL, mesh, isFirstRepartition_m);
138 }
139}
140
141template <typename T, unsigned Dim>
143 std::fill_n(globalPartPerNode_m.get(), ippl::Comm->size(), 0); // Fill the array with zeros
144 globalPartPerNode_m[ippl::Comm->rank()] = getLocalNum();
145 ippl::Comm->allreduce(globalPartPerNode_m.get(),
146 ippl::Comm->size(),
147 std::plus<size_t>());
148}
149
150template <typename T, unsigned Dim>
151void PartBunch<T, Dim>::setSolver(std::string solver) {
152 if (this->solver_m != "")
153 *gmsg << "* Warning solver already initiated but overwrite ..." << endl;
154
155 this->solver_m = solver;
156
157 this->fcontainer_m->initializeFields(this->solver_m);
158
159 this->setFieldSolver(std::make_shared<FieldSolver_t>(this->solver_m, &this->fcontainer_m->getRho(), &this->fcontainer_m->getE(),
160 &this->fcontainer_m->getPhi()));
161
162 this->fsolver_m->initSolver();
163
165 this->setLoadBalancer(std::make_shared<LoadBalancer_t>(this->lbt_m, this->fcontainer_m, this->pcontainer_m, this->fsolver_m));
166
167 *gmsg << "* Solver and Load Balancer set" << endl;
168}
169
170template <typename T, unsigned Dim>
172 Inform msg("EParticleStats");
173
174 auto pE_view = this->pcontainer_m->E.getView();
175 auto fphi_view = this->fcontainer_m->getPhi().getView();
176
177
178
179 double avgphi = 0.0;
180 double avgE = 0.0;
181 double minEComponent = std::numeric_limits<T>::max();
182 double maxEComponent = std::numeric_limits<T>::min();
183 double minE = std::numeric_limits<T>::max();
184 double maxE = std::numeric_limits<T>::min();
185 double cc = getCouplingConstant();
186
187 int myRank = ippl::Comm->rank();
188
190 "check e-field", this->getLocalNum(),
191 KOKKOS_LAMBDA(const int i, double& loc_avgE, double& loc_minEComponent,
192 double& loc_maxEComponent, double& loc_minE, double& loc_maxE) {
193 double EX = pE_view[i][0]*cc;
194 double EY = pE_view[i][1]*cc;
195 double EZ = pE_view[i][2]*cc;
196
197 double ENorm = Kokkos::sqrt(EX*EX + EY*EY + EZ*EZ);
198
199 loc_avgE += ENorm;
200
201 loc_minEComponent = EX < loc_minEComponent ? EX : loc_minEComponent;
202 loc_minEComponent = EY < loc_minEComponent ? EY : loc_minEComponent;
203 loc_minEComponent = EZ < loc_minEComponent ? EZ : loc_minEComponent;
204
205 loc_maxEComponent = EX > loc_maxEComponent ? EX : loc_maxEComponent;
206 loc_maxEComponent = EY > loc_maxEComponent ? EY : loc_maxEComponent;
207 loc_maxEComponent = EZ > loc_maxEComponent ? EZ : loc_maxEComponent;
208
209 loc_minE = ENorm < loc_minE ? ENorm : loc_minE;
210 loc_maxE = ENorm > loc_maxE ? ENorm : loc_maxE;
211 },
212 Kokkos::Sum<T>(avgE), Kokkos::Min<T>(minEComponent),
213 Kokkos::Max<T>(maxEComponent), Kokkos::Min<T>(minE),
214 Kokkos::Max<T>(maxE));
215
216 if (this->getLocalNum() == 0) {
217 minEComponent = maxEComponent = minE = maxE = avgE = 0.0;
218 }
219
220 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &avgE, &avgE, 1, MPI_DOUBLE, MPI_SUM, 0, ippl::Comm->getCommunicator());
221 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &minEComponent, &minEComponent, 1, MPI_DOUBLE, MPI_MIN, 0, ippl::Comm->getCommunicator());
222 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &maxEComponent, &maxEComponent, 1, MPI_DOUBLE, MPI_MAX, 0, ippl::Comm->getCommunicator());
223 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &minE, &minE, 1, MPI_DOUBLE, MPI_MIN, 0, ippl::Comm->getCommunicator());
224 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &maxE, &maxE, 1, MPI_DOUBLE, MPI_MAX, 0, ippl::Comm->getCommunicator());
225
226 size_t Np = this->getTotalNum();
227 avgE /= (Np == 0) ? 1 : Np; // avoid division by zero for empty simulations (see also DistributionMoments::computeMeans implementation)
228
229 msg << "avgENorm = " << avgE << endl;
230
231 using mdrange_type = Kokkos::MDRangePolicy<Kokkos::Rank<3>>;
232
234 "check phi", mdrange_type({0,0,0}, {fphi_view.extent(0),fphi_view.extent(1),fphi_view.extent(2)}),
235 KOKKOS_LAMBDA(const int i, const int j, const int k, double& loc_avgphi) {
236 double phi = fphi_view(i,j,k);
237 loc_avgphi += phi;
238 },
239 Kokkos::Sum<T>(avgphi));
240
241 MPI_Reduce(myRank == 0 ? MPI_IN_PLACE : &avgphi, &avgphi, 1, MPI_DOUBLE, MPI_SUM, 0, ippl::Comm->getCommunicator());
242 avgphi /= this->getTotalNum();
243 msg << "avgphi = " << avgphi << endl;
244
245}
246
247template <typename T, unsigned Dim>
249 std::shared_ptr<ParticleContainer_t> pc = this->pcontainer_m;
250
251 auto Rview = pc->R.getView();
252 auto Pview = pc->P.getView();
253
257
258
259 double loc_centroid[2 * Dim] = {};
260 double loc_moment[2 * Dim][2 * Dim] = {};
261
262 double centroid[2 * Dim] = {};
263 double moment[2 * Dim][2 * Dim] = {};
264
265 for (unsigned i = 0; i < 2 * Dim; i++) {
266 loc_centroid[i] = 0.0;
267 for (unsigned j = 0; j <= i; j++) {
268 loc_moment[i][j] = 0.0;
269 loc_moment[j][i] = 0.0;
270 }
271 }
272
273 for (unsigned i = 0; i < 2 * Dim; ++i) {
275 "calc moments of particle distr.", ippl::getRangePolicy(Rview),
276 KOKKOS_LAMBDA(
277 const int k, double& cent, double& mom0, double& mom1, double& mom2,
278 double& mom3, double& mom4, double& mom5) {
279 double part[2 * Dim];
280 part[0] = Rview(k)[0];
281 part[1] = Pview(k)[0];
282 part[2] = Rview(k)[1];
283 part[3] = Pview(k)[1];
284 part[4] = Rview(k)[2];
285 part[5] = Pview(k)[2];
286
287 cent += part[i];
288 mom0 += part[i] * part[0];
289 mom1 += part[i] * part[1];
290 mom2 += part[i] * part[2];
291 mom3 += part[i] * part[3];
292 mom4 += part[i] * part[4];
293 mom5 += part[i] * part[5];
294 },
295 Kokkos::Sum<T>(loc_centroid[i]), Kokkos::Sum<T>(loc_moment[i][0]),
296 Kokkos::Sum<T>(loc_moment[i][1]), Kokkos::Sum<T>(loc_moment[i][2]),
297 Kokkos::Sum<T>(loc_moment[i][3]), Kokkos::Sum<T>(loc_moment[i][4]),
298 Kokkos::Sum<T>(loc_moment[i][5]));
300 }
301
302 MPI_Allreduce(loc_moment, moment, 2 * Dim * 2 * Dim, MPI_DOUBLE, MPI_SUM, ippl::Comm->getCommunicator());
303 MPI_Allreduce(loc_centroid, centroid, 2 * Dim, MPI_DOUBLE, MPI_SUM, ippl::Comm->getCommunicator());
304 ippl::Comm->barrier();
305
306 double rmax_loc[Dim];
307 double rmin_loc[Dim];
308 double rmax[Dim];
309 double rmin[Dim];
310
311 for (unsigned d = 0; d < Dim; ++d) {
313 "rel max", this->getLocalNum(),
314 KOKKOS_LAMBDA(const int i, double& mm) {
315 double tmp_vel = Rview(i)[d];
316 mm = tmp_vel > mm ? tmp_vel : mm;
317 },
318 Kokkos::Max<T>(rmax_loc[d]));
319
321 "rel min", this->getLocalNum(),
322 KOKKOS_LAMBDA(const int i, double& mm) {
323 double tmp_vel = Rview(i)[d];
324 mm = tmp_vel < mm ? tmp_vel : mm;
325 },
326 Kokkos::Min<T>(rmin_loc[d]));
327 }
329 MPI_Allreduce(rmax_loc, rmax, Dim, MPI_DOUBLE, MPI_MAX, ippl::Comm->getCommunicator());
330 MPI_Allreduce(rmin_loc, rmin, Dim, MPI_DOUBLE, MPI_MIN, ippl::Comm->getCommunicator());
331 ippl::Comm->barrier();
332
333 // \todo can we do this nicer?
334 for (unsigned int i=0; i<Dim; i++) {
335 rmax_m(i) = rmax[i];
336 rmin_m(i) = rmin[i];
337 }
338}
339
340template <typename T, unsigned Dim>
342 this->fcontainer_m->getRho() = 0.0;
343 this->fsolver_m->runSolver();
344}
345
346template <typename T, unsigned Dim>
348 // if (this->getLocalNum() != 0) { // to suppress Nans
349 Inform::FmtFlags_t ff = os.flags();
350
351 os << std::scientific;
352 os << level1 << "\n";
353 os << "* ************** B U N C H "
354 "********************************************************* \n";
355 os << "* PARTICLES = " << this->getTotalNum() << "\n";
356 os << "* CHARGE = " << this->qi_m*this->getTotalNum() << " (Cb) \n";
357 os << "* INTEGRATOR = " << integration_method_m << "\n";
358 os << "* MIN R (origin) = " << Util::getLengthString( this->pcontainer_m->getMinR(), 5) << "\n";
359 os << "* MAX R (max ext) = " << Util::getLengthString( this->pcontainer_m->getMaxR(), 5) << "\n";
360 os << "* RMS R = " << Util::getLengthString( this->pcontainer_m->getRmsR(), 5) << "\n";
361 os << "* RMS P = " << this->pcontainer_m->getRmsP() << " [beta gamma]\n";
362 os << "* Mean R: " << this->pcontainer_m->getMeanR() << " [m]\n";
363 os << "* Mean P: " << this->pcontainer_m->getMeanP() << " [beta gamma]\n";
364 os << "* MESH SPACING = " << Util::getLengthString( this->fcontainer_m->getMesh().getMeshSpacing(), 5) << "\n";
365 os << "* COMPDOM INCR = " << this->OPALFieldSolver_m->getBoxIncr() << " (%) \n";
366 os << "* FIELD LAYOUT = " << this->fcontainer_m->getFL() << "\n";
367 os << "* Centroid : \n* ";
368 for (unsigned int i=0; i<2*Dim; i++) {
369 os << this->pcontainer_m->getCentroid()[i] << " ";
370 }
371 os << endl << "* Cov Matrix : \n* ";
372 for (unsigned int i=0; i<2*Dim; i++) {
373 for (unsigned int j=0; j<2*Dim; j++) {
374 os << this->pcontainer_m->getCovMatrix()(i,j) << " ";
375 }
376 os << "\n* ";
377 }
378 os << "* "
379 "********************************************************************************"
380 "** "
381 << endl;
382 os.flags(ff);
383 return os;
384}
385
386template <typename T, unsigned Dim>
388 /* \brief
389 1. calculates and set hr
390 2. do repartitioning
391 */
392 Inform m ("bunchUpdate ");
393
394 auto *mesh = &this->fcontainer_m->getMesh();
395 auto *FL = &this->fcontainer_m->getFL();
396
397 std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
398
399 pc->computeMinMaxR();
400
402
403 ippl::Vector<double, 3> o = pc->getMinR() - std::numeric_limits<T>::lowest();
404 ippl::Vector<double, 3> e = pc->getMaxR() + std::numeric_limits<T>::lowest();
406
407 hr_m = (1.0+this->OPALFieldSolver_m->getBoxIncr()/100.)*(l / this->nr_m);
408 mesh->setMeshSpacing(hr);
409 mesh->setOrigin(o-0.5*hr_m*this->OPALFieldSolver_m->getBoxIncr()/100.);
410
411 pc->getLayout().updateLayout(*FL, *mesh);
412 pc->update();
413
414 this->getFieldContainer()->setRMin(o);
415 this->getFieldContainer()->setRMax(e);
416 this->getFieldContainer()->setHr(hr);
417
418 this->isFirstRepartition_m = true;
419 this->loadbalancer_m->initializeORB(FL, mesh);
420 this->loadbalancer_m->repartition(FL, mesh, this->isFirstRepartition_m);
421 this->updateMoments();
422}
423
424template <typename T, unsigned Dim>
426
427 /* \brief
428 1. calculates and set hr
429 2. do repartitioning
430 */
431
432 auto *mesh = &this->fcontainer_m->getMesh();
433 auto *FL = &this->fcontainer_m->getFL();
434
435 std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
436
437 pc->computeMinMaxR();
438
439 ippl::Vector<double, 3> o = pc->getMinR();
440 ippl::Vector<double, 3> e = pc->getMaxR();
442
443 hr_m = (1.0+this->OPALFieldSolver_m->getBoxIncr()/100.)*(l / this->nr_m);
444 mesh->setMeshSpacing(hr_m);
445 mesh->setOrigin(o-0.5*hr_m*this->OPALFieldSolver_m->getBoxIncr()/100.);
446
447 this->getFieldContainer()->setRMin(o);
448 this->getFieldContainer()->setRMax(e);
449 this->getFieldContainer()->setHr(hr_m);
450
451 pc->getLayout().updateLayout(*FL, *mesh);
452 pc->update();
453
454 this->isFirstRepartition_m = true;
455 //this->loadbalancer_m->initializeORB(FL, mesh);
456 //this->loadbalancer_m->repartition(FL, mesh, this->isFirstRepartition_m);
457
458 this->updateMoments();
459}
460
461template <typename T, unsigned Dim>
463 static IpplTimings::TimerRef completeBinningT = IpplTimings::getTimer("bTotalBinningT");
464
465 // Start binning and sorting by bins
466 std::shared_ptr<AdaptBins_t> bins = this->getBins();
467 //VField_t<double, 3>& Etmp = *(this->getTempEField());
468
469 IpplTimings::startTimer(completeBinningT);
470 bins->doFullRebin(bins->getMaxBinCount()); // rebin with 128 bins // bins->getMaxBinCount()
471 bins->print(); // For debugging...
472 bins->sortContainerByBin(); // Sort BEFORE, since it generates less atomics overhead with more bins!
473
474
475 bins->genAdaptiveHistogram(); // merge bins with width/N_part ratio of 1.0
476 IpplTimings::stopTimer(completeBinningT);
477
478 bins->print(); // For debugging...
479
480
481 static IpplTimings::TimerRef SolveTimer = IpplTimings::getTimer("SolveTimer");
482 IpplTimings::startTimer(SolveTimer);
483
484 /*
485 \todo check if Lorentz transform is needed
486
487 double gammaz = this->pcontainer_m->getMeanGammaZ();
488 gammaz *= gammaz;
489 gammaz = std::sqrt(gammaz + 1.0);
490
491 Vector_t<double, 3> hr_scaled = hr_m;
492 // hr_scaled[2] *= gammaz;
493
494 hr_m = hr_scaled;
495
496 */
497
498 /*
499 particles have moved need to adjust grid
500 \todo might not work -- can use container update for testing!
501 */
502 //std::shared_ptr<ParticleContainer_t> pc = this->getParticleContainer();
503 //pc->update();
504 this->bunchUpdate();
505
506 /*
507
508 scatterCIC start
509
510 */
511
513
514
515 ippl::ParticleAttrib<T>* Q = &this->pcontainer_m->Q;
516 typename Base::particle_position_type* R = &this->pcontainer_m->R;
517
518 this->fcontainer_m->getRho() = 0.0;
519 Field_t<Dim>* rho = &this->fcontainer_m->getRho();
520
522 // Charge "unit" here is "charge per macroparticle" [C]!
523 *Q = (*Q) * this->pcontainer_m->dt; // Scale by time step
524 scatter(*Q, *rho, *R);
525 *Q = (*Q) / this->pcontainer_m->dt; // Rescale back to charge per macroparticle
526
527 /*
528 Now rho is in units of [C * s] -- need to divide by dt to get back to [C].
529 Note By using the global timestep getdT(), we account for the possibility of
530 "fractional timesteps", meaning the particle was virtually "created in the
531 middle" of a full timestep. As of now, this might not be necessary.
532 */
533 (*rho) = (*rho) / getdT();
534
535#ifdef doDEBUG
536 const double qtot = this->qi_m * this->getTotalNum();
537 size_type TotalParticles = 0;
538 size_type localParticles = this->pcontainer_m->getLocalNum();
539
540 double relError = std::fabs((qtot - (*rho).sum()) / qtot);
541
542 ippl::Comm->reduce(localParticles, TotalParticles, 1, std::plus<size_type>());
543
544 if ((ippl::Comm->rank() == 0) && (relError > 1.0E-13)) {
545 Inform m("computeSelfFields w CICScatter ", INFORM_ALL_NODES);
546 m << "Time step: " << it_m
547 << " total particles in the sim. " << totalP_m
548 << " missing : " << totalP_m-TotalParticles
549 << " rel. error in charge conservation: " << relError << endl;
550 }
551#endif
552
553 // At this point, the units of rho need to be corrected: rho = rho / cellVolume
554 double cellVolumeInv = 1 / std::reduce(hr_m.begin(), hr_m.end(), 1., std::multiplies<double>());
555 (*rho) = (*rho) * cellVolumeInv;
556
557 // Alpine uses net 0 charge density for periodic BCs, so we need to subtract background charge here (?TODO: check)
558 double totalQ = getCharge();
559 if (this->fsolver_m->getStype() != "OPEN") {
560 double size = 1;
561 for (size_t d = 0; d < 3; d++) {
562 size *= rmax_m[d] - rmin_m[d];
563 }
564 (*rho) = (*rho) - (totalQ / size);
565 }
566
567 /*
568 This concludes the scatter step ( \todo perhaps we want to put this in its own function, like scatterCIC)
569
570 Next is the solver step. The solver computes the E-field from rho directly. However,
571 at the moment, the potential has units of [C/m^3].
572
573 From OPAL:
574 The scalar potential is given back with rho_m in units [C/m] = [F*V/m] and must be divided by
575 4*pi*\epsilon_0 [F/m] resulting in [V].
576 */
577 (*rho) = (*rho) / getCouplingConstant(); // now rho_m has units of [V]
578
579 this->fsolver_m->runSolver();
580 // Now, with E=-grad(phi), E has units of [V/m] (note, phi is a scalar potential)
581
582 gather(this->pcontainer_m->E, this->fcontainer_m->getE(), this->pcontainer_m->R);
583
584#ifdef doDEBUG
585 Inform m("computeSelfFields w CICScatter ", INFORM_ALL_NODES);
586 double cellVolume = std::reduce(hr_m.begin(), hr_m.end(), 1., std::multiplies<double>());
587 m << "cellVolume= " << cellVolume << endl;
588 m << "Sum over E-field after gather = " << this->fcontainer_m->getE().sum() << endl;
589#endif
590
591 /*
592 Vector_t<double, 3> efScale = Vector_t<double,3>(gammaz*cc/hr_scaled[0], gammaz*cc/hr_scaled[1], cc / gammaz / hr_scaled[2]);
593 m << "efScale = " << efScale << endl;
594 spaceChargeEFieldCheck(efScale);
595 */
596
597 //IpplTimings::stopTimer(SolveTimer);
598}
599
600template <typename T, unsigned Dim>
605 Inform m("scatterCICPerBin");
606 m << "Scattering binIndex = " << binIndex << " to grid." << endl;
607
608 this->fcontainer_m->getRho() = 0.0;
609
610 ippl::ParticleAttrib<T>* q = &this->pcontainer_m->Q;
611 typename Base::particle_position_type* R = &this->pcontainer_m->R;
612 Field_t<Dim>* rho = &this->fcontainer_m->getRho();
613
614 double Q;
615 Vector_t<double, 3> rmin = rmin_m;
616 Vector_t<double, 3> rmax = rmax_m;
617 Vector_t<double, 3> hr = hr_m;
618
619 if (binIndex == -1) {
620 // Use original scatterCIC logic for all particles
621 Q = this->qi_m * this->getTotalNum();
622 scatter(*q, *rho, *R);
623 } else {
624 // Use per-bin scattering logic
625 Q = this->qi_m * this->bins_m->getNPartInBin(binIndex, true);
626 scatter(*q, *rho, *R, this->bins_m->getBinIterationPolicy(binIndex), this->bins_m->getHashArray());
627 }
628
629 m << "gammz= " << this->pcontainer_m->getMeanP()[2] << endl;
630
631#ifdef doDEBUG
632 double relError = std::fabs((Q - (*rho).sum()) / Q);
633 size_type TotalParticles = 0;
634 size_type localParticles = this->pcontainer_m->getLocalNum();
635 size_type totalP_check = (binIndex == -1) ? totalP_m : this->pcontainer_m->getTotalNum();
636
637 m << "computeSelfFields sum rho = " << (*rho).sum() << ", relError = " << relError << endl;
638
639 ippl::Comm->reduce(localParticles, TotalParticles, 1, std::plus<size_type>());
640
641 if (ippl::Comm->rank() == 0) {
642 if (TotalParticles != totalP_check || relError > 1e-10) {
643 m << "Time step: " << it_m << endl;
644 m << "Total particles in the sim. " << totalP_check << " "
645 << "after update: " << TotalParticles << endl;
646 m << "Rel. error in charge conservation: " << relError << endl;
647 ippl::Comm->abort();
648 }
649 }
650#endif
651
652 double cellVolume = std::reduce(hr.begin(), hr.end(), 1., std::multiplies<double>());
653
654 m << "cellVolume= " << cellVolume << endl;
655
656 (*rho) = (*rho) / cellVolume;
657
658 // rho = rho_e - rho_i (only if periodic BCs)
659 if (this->fsolver_m->getStype() != "OPEN") {
660 double size = 1;
661 for (unsigned d = 0; d < 3; d++) {
662 size *= rmax[d] - rmin[d];
663 }
664 *rho = *rho - (Q / size);
665 }
666}
667
668template <typename T, unsigned Dim>
670 Inform ms("PartBunch::performBunchSanityChecks");
672
673 // Check if bc handler was initialized properly
674 if (!this->getBCHandler()) {
675 throw OpalException("PartBunch::performBunchSanityChecks",
676 "BC Handler not initialized properly.");
677 }
678 ms << "BC Handler initialized properly." << endl;
679}
680
681
682// Explicit instantiations
683template class PartBunch<double, 3>;
Inform * gmsg
Definition: changes.cpp:7
ippl::detail::size_type size_type
Inform & endl(Inform &inf)
Definition: Inform.cpp:42
Inform & level1(Inform &inf)
Definition: Inform.cpp:48
#define INFORM_ALL_NODES
Definition: Inform.h:38
constexpr double e
The value of.
Definition: Physics.h:39
double binningBeta
Definition: Options.cpp:109
double binningAlpha
Definition: Options.cpp:108
int maxBins
Definition: Options.cpp:107
double desiredWidth
Definition: Options.cpp:110
std::string getLengthString(double spos, unsigned int precision=3)
Definition: Util.h:93
Implementations for FFT constructor/destructor and transforms.
Definition: Archive.h:20
void fence()
Definition: Ippl.cpp:103
KOKKOS_INLINE_FUNCTION Vector< T, Dim > max(const Vector< T, Dim > &a, const Vector< T, Dim > &b)
Definition: Vector.hpp:226
RangePolicy< View::rank, typenameView::execution_space, PolicyArgs... >::policy_type getRangePolicy(const View &view, int shift=0)
void gather(Attrib1 &attrib, Field &f, const Attrib2 &pp, const bool addToAttribute=false)
Non-class interface for gathering field data into a particle attribute.
KOKKOS_INLINE_FUNCTION Vector< T, Dim > min(const Vector< T, Dim > &a, const Vector< T, Dim > &b)
Definition: Vector.hpp:217
void parallel_reduce(const std::string &name, const ExecPolicy &policy, const FunctorType &functor, ReducerArgument &&... reducer)
std::unique_ptr< mpi::Communicator > Comm
Definition: Ippl.h:22
void scatter(const Attrib1 &attrib, Field &f, const Attrib2 &pp)
Non-class interface for scattering particle attribute data onto a field.
void gatherLoadBalanceStatistics()
void performBunchSanityChecks() const
Definition: PartBunch.cpp:669
Vector_t< double, Dim > rmin_m
std::array< bool, Dim > decomp_m
Definition: PartBunch.h:102
void computeSelfFields()
void pre_run() override
A method that should be used for setting up the simulation.
Definition: PartBunch.cpp:341
void calcBeamParameters()
std::shared_ptr< VField_t< T, Dim > > getTempEField()
Definition: PartBunch.h:232
void scatterCICPerBin(binIndex_t binIndex)
Definition: PartBunch.cpp:601
void do_binaryRepart()
Vector_t< double, Dim > rmax_m
std::shared_ptr< FieldSolverCmd > OPALFieldSolver_m
Definition: PartBunch.h:180
std::shared_ptr< BCHandler_t > getBCHandler() const
Definition: PartBunch.h:240
void setBCHandler(std::shared_ptr< BCHandler_t > bcHandler)
Definition: PartBunch.h:239
Vector_t< T, Dim > nr_m
typename ParticleContainer_t::bin_index_type binIndex_t
Definition: PartBunch.h:54
std::shared_ptr< ParticleContainer_t > getParticleContainer()
Definition: PartBunch.h:221
PartBunch()=delete
double dt_m
6x6 matrix of the moments of the beam
void setSolver(std::string solver)
ippl::NDIndex< Dim > domain_m
Definition: PartBunch.h:101
void bunchUpdate()
Definition: PartBunch.cpp:425
Vector_t< double, Dim > hr_m
mesh size [m]
void spaceChargeEFieldCheck(Vector_t< double, 3 > efScale)
Definition: PartBunch.cpp:171
void setBins(std::shared_ptr< AdaptBins_t > bins)
Definition: PartBunch.h:237
typename ParticleBinning::CoordinateSelector< ParticleContainer_t > BinningSelector_t
Definition: PartBunch.h:52
Inform & print(Inform &os)
Vector_t< double, Dim > origin_m
Definition: PartBunch.h:89
void gatherCIC()
std::shared_ptr< AdaptBins_t > getBins()
Definition: PartBunch.h:235
std::unique_ptr< size_t[]> globalPartPerNode_m
void setTempEField(std::shared_ptr< VField_t< T, Dim > > Etmp)
Definition: PartBunch.h:233
short int bin_index_type
Defines which type to use as a particle bin.
The base class for all OPAL exceptions.
Definition: OpalException.h:28
void setParticleContainer(std::shared_ptr< ParticleContainer< T, 3 > > pcontainer)
Definition: PicManager.h:55
void setFieldContainer(std::shared_ptr< FieldContainer< T, 3 > > fcontainer)
Definition: PicManager.h:59
detail::size_type size_type
Definition: ParticleBase.h:111
typename PLayout::particle_position_type particle_position_type
Definition: ParticleBase.h:93
particle_position_type R
view of particle positions
Definition: ParticleBase.h:115
KOKKOS_INLINE_FUNCTION constexpr iterator begin()
Definition: Vector.hpp:160
KOKKOS_INLINE_FUNCTION constexpr iterator end()
Definition: Vector.hpp:165
Definition: Inform.h:40
std::ios_base::fmtflags FmtFlags_t
Definition: Inform.h:99
FmtFlags_t flags() const
Definition: Inform.h:105
Timing::TimerRef TimerRef
Definition: IpplTimings.h:144
static TimerRef getTimer(const char *nm)
Definition: IpplTimings.h:150
static void stopTimer(TimerRef t)
Definition: IpplTimings.h:156
static void startTimer(TimerRef t)
Definition: IpplTimings.h:153
constexpr unsigned Dim