Actual source code: plexland.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsclandau.h>
3: #include <petscts.h>
4: #include <petscdmforest.h>
5: #include <petscdmcomposite.h>
7: /* Landau collision operator */
9: /* relativistic terms */
10: #if defined(PETSC_USE_REAL_SINGLE)
11: #define SPEED_OF_LIGHT 2.99792458e8F
12: #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
13: #else
14: #define SPEED_OF_LIGHT 2.99792458e8
15: #define C_0(v0) (SPEED_OF_LIGHT/v0) /* needed for relativistic tensor on all architectures */
16: #endif
18: #define PETSC_THREAD_SYNC
19: #include "land_tensors.h"
21: /* vector padding not supported */
22: #define LANDAU_VL 1
24: static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
25: {
26: P4estVertexMaps *maps = (P4estVertexMaps*)ptr;
27: PetscErrorCode ierr;
29: // free device data
30: if (maps[0].deviceType != LANDAU_CPU) {
31: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
32: if (maps[0].deviceType == LANDAU_KOKKOS) {
33: LandauKokkosDestroyMatMaps(maps, maps[0].numgrids); // imples Kokkos does
34: } // else could be CUDA
35: #elif defined(PETSC_HAVE_CUDA)
36: if (maps[0].deviceType == LANDAU_CUDA) {
37: LandauCUDADestroyMatMaps(maps, maps[0].numgrids);
38: } else SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %D ?????",maps->deviceType);
39: #endif
40: }
41: // free host data
42: for (PetscInt grid=0 ; grid < maps[0].numgrids ; grid++) {
43: PetscFree(maps[grid].c_maps);
44: PetscFree(maps[grid].gIdx);
45: }
46: PetscFree(maps);
48: return(0);
49: }
50: static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
51: {
52: PetscReal v2 = 0;
54: /* compute v^2 / 2 */
55: for (int i = 0; i < dim; ++i) v2 += x[i]*x[i];
56: /* evaluate the Maxwellian */
57: u[0] = v2/2;
58: return(0);
59: }
61: /* needs double */
62: static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
63: {
64: PetscReal *c2_0_arr = ((PetscReal*)actx);
65: double u2 = 0, c02 = (double)*c2_0_arr, xx;
68: /* compute u^2 / 2 */
69: for (int i = 0; i < dim; ++i) u2 += x[i]*x[i];
70: /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
71: xx = u2/c02;
72: #if defined(PETSC_USE_DEBUG)
73: u[0] = PetscSqrtReal(1. + xx);
74: #else
75: u[0] = xx/(PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
76: #endif
77: return(0);
78: }
80: /*
81: LandauFormJacobian_Internal - Evaluates Jacobian matrix.
83: Input Parameters:
84: . globX - input vector
85: . actx - optional user-defined context
86: . dim - dimension
88: Output Parameters:
89: . J0acP - Jacobian matrix filled, not created
90: */
91: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
92: {
93: LandauCtx *ctx = (LandauCtx*)a_ctx;
94: PetscErrorCode ierr;
95: PetscInt numCells[LANDAU_MAX_GRIDS],Nq,Nb,Nf[LANDAU_MAX_GRIDS],d,f,fieldA,qj,N,nip_glb;
96: PetscQuadrature quad;
97: const PetscReal *quadWeights;
98: PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
99: PetscReal Eq_m[LANDAU_MAX_SPECIES], m_0=ctx->m_0; /* normalize mass -- not needed! */
100: PetscScalar *cellClosure=NULL;
101: const PetscScalar *xdata=NULL;
102: PetscDS prob;
103: //PetscLogDouble flops;
104: PetscContainer container;
105: P4estVertexMaps *maps;
106: PetscSection section[LANDAU_MAX_GRIDS],globsection[LANDAU_MAX_GRIDS];
107: Mat subJ[LANDAU_MAX_GRIDS];
113: /* check for matrix container for GPU assembly */
114: PetscLogEventBegin(ctx->events[10],0,0,0,0);
115: DMGetDS(ctx->plex[0], &prob); // same DS for all grids
116: PetscDSGetTabulation(prob, &Tf); // Bf, &Df same for all grids
117: PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);
118: if (container) {
119: if (!ctx->gpu_assembly) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"GPU matrix container but no GPU assembly");
120: PetscContainerGetPointer(container, (void **) &maps);
121: if (!maps) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
122: for (PetscInt grid=0;grid<ctx->num_grids;grid++) subJ[grid] = NULL;
123: } else {
124: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
125: DMCreateMatrix(ctx->plex[grid], &subJ[grid]);
126: }
127: maps = NULL;
128: }
129: /* DS, Tab and quad is same on all grids */
130: if (ctx->plex[0] == NULL) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
131: PetscFEGetQuadrature(ctx->fe[0], &quad);
132: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights); Nb = Nq;
133: if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
134: if (LANDAU_DIM != dim) SETERRQ2(ctx->comm, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM);
135: /* setup each grid */
136: nip_glb = 0;
137: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
138: PetscInt cStart, cEnd;
139: if (ctx->plex[grid] == NULL) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"Plex not created");
140: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
141: numCells[grid] = cEnd - cStart; // grids can have different topology
142: nip_glb += Nq*numCells[grid];
143: DMGetLocalSection(ctx->plex[grid], §ion[grid]);
144: DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
145: PetscSectionGetNumFields(section[grid], &Nf[grid]);
146: }
147: VecGetSize(a_X,&N);
148: PetscLogEventEnd(ctx->events[10],0,0,0,0);
149: if (!ctx->initialized) { /* create static point data, Jacobian called first */
150: PetscReal *invJ,*ww,*xx,*yy,*zz=NULL,*invJ_a;
151: PetscInt outer_ipidx, outer_ej,grid;
152: PetscFE fe;
154: PetscLogEventBegin(ctx->events[7],0,0,0,0);
155: PetscInfo(ctx->plex[0], "Initialize static data\n");
156: /* collect f data, first time is for Jacobian, but make mass now */
157: if (ctx->verbose > 0) {
158: PetscPrintf(ctx->comm,"%D) %s: %D IPs, %D cells[0], Nb=%D, Nq=%D, dim=%D, Tab: Nb=%D Nf=%D Np=%D cdim=%D N=%D\n",
159: 0,"FormLandau",nip_glb,numCells[0], Nb, Nq, dim, Tf[0]->Nb, ctx->num_species, Tf[0]->Np, Tf[0]->cdim, N);
160: }
161: PetscMalloc4(nip_glb,&ww,nip_glb,&xx,nip_glb,&yy,nip_glb*dim*dim,&invJ_a);
162: if (dim==3) {
163: PetscMalloc1(nip_glb,&zz);
164: }
165: if (ctx->use_energy_tensor_trick) {
166: PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe);
167: PetscObjectSetName((PetscObject) fe, "energy");
168: }
169: /* init each grid */
170: for (grid=0, outer_ipidx=0, outer_ej=0 ; grid < ctx->num_grids ; grid++) {
171: Vec v2_2 = NULL; // projected function: v^2/2 for non-relativistic, gamma... for relativistic
172: PetscSection e_section;
173: DM dmEnergy;
174: PetscInt cStart, cEnd, ej;
176: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
177: // prep energy trick, get v^2 / 2 vector
178: if (ctx->use_energy_tensor_trick) {
179: PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
180: Vec glob_v2;
181: PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};
183: DMClone(ctx->plex[grid], &dmEnergy);
184: PetscObjectSetName((PetscObject) dmEnergy, "energy");
185: DMSetField(dmEnergy, 0, NULL, (PetscObject)fe);
186: DMCreateDS(dmEnergy);
187: DMGetSection(dmEnergy, &e_section);
188: DMGetGlobalVector(dmEnergy,&glob_v2);
189: PetscObjectSetName((PetscObject) glob_v2, "trick");
190: c2_0[0] = &data[0];
191: DMProjectFunction(dmEnergy, 0., energyf, (void**)c2_0, INSERT_ALL_VALUES, glob_v2);
192: DMGetLocalVector(dmEnergy, &v2_2);
193: VecZeroEntries(v2_2); /* zero BCs so don't set */
194: DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2);
195: DMGlobalToLocalEnd (dmEnergy, glob_v2, INSERT_VALUES, v2_2);
196: DMViewFromOptions(dmEnergy,NULL, "-energy_dm_view");
197: VecViewFromOptions(glob_v2,NULL, "-energy_vec_view");
198: DMRestoreGlobalVector(dmEnergy, &glob_v2);
199: }
200: /* append part of the IP data for each grid */
201: for (ej = 0 ; ej < numCells[grid]; ++ej, ++outer_ej) {
202: PetscScalar *coefs = NULL;
203: PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
204: invJ = invJ_a + outer_ej * Nq*dim*dim;
205: DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej+cStart, quad, vj, Jdummy, invJ, detJj);
206: if (ctx->use_energy_tensor_trick) {
207: DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);
208: }
209: /* create static point data */
210: for (qj = 0; qj < Nq; qj++, outer_ipidx++) {
211: const PetscInt gidx = outer_ipidx;
212: ww [gidx] = detJj[qj] * quadWeights[qj];
213: if (dim==2) ww [gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
214: // get xx, yy, zz
215: if (ctx->use_energy_tensor_trick) {
216: double refSpaceDer[3],eGradPhi[3];
217: const PetscReal * const DD = Tf[0]->T[1];
218: const PetscReal *Dq = &DD[qj*Nb*dim];
219: for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
220: for (int b = 0; b < Nb; ++b) {
221: for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b*dim+d]*PetscRealPart(coefs[b]);
222: }
223: xx[gidx] = 1e10;
224: if (ctx->use_relativistic_corrections) {
225: double dg2_c2 = 0;
226: //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
227: for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
228: dg2_c2 *= (double)c02;
229: if (dg2_c2 >= .999) {
230: xx[gidx] = vj[qj * dim + 0]; /* coordinate */
231: yy[gidx] = vj[qj * dim + 1];
232: if (dim==3) zz[gidx] = vj[qj * dim + 2];
233: PetscPrintf(ctx->comm,"Error: %12.5e %D.%D) dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n",PetscSqrtReal(xx[gidx]*xx[gidx] + yy[gidx]*yy[gidx] + zz[gidx]*zz[gidx]), ej, qj, dg2_c2, xx[gidx],yy[gidx],zz[gidx]);
234: } else {
235: PetscReal fact = c02/PetscSqrtReal(1. - dg2_c2);
236: for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
237: // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
238: }
239: }
240: if (xx[gidx] == 1e10) {
241: for (int d = 0; d < dim; ++d) {
242: for (int e = 0 ; e < dim; ++e) {
243: eGradPhi[d] += invJ[qj * dim * dim + e*dim+d]*refSpaceDer[e];
244: }
245: }
246: xx[gidx] = eGradPhi[0];
247: yy[gidx] = eGradPhi[1];
248: if (dim==3) zz[gidx] = eGradPhi[2];
249: }
250: } else {
251: xx[gidx] = vj[qj * dim + 0]; /* coordinate */
252: yy[gidx] = vj[qj * dim + 1];
253: if (dim==3) zz[gidx] = vj[qj * dim + 2];
254: }
255: } /* q */
256: if (ctx->use_energy_tensor_trick) {
257: DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej+cStart, NULL, &coefs);
258: }
259: } /* ej */
260: if (ctx->use_energy_tensor_trick) {
261: DMRestoreLocalVector(dmEnergy, &v2_2);
262: DMDestroy(&dmEnergy);
263: }
264: } /* grid */
265: if (ctx->use_energy_tensor_trick) {
266: PetscFEDestroy(&fe);
267: }
269: /* cache static data */
270: if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
271: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
272: PetscReal invMass[LANDAU_MAX_SPECIES],nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
273: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
274: for (PetscInt ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++) {
275: invMass[ii] = m_0/ctx->masses[ii];
276: nu_alpha[ii] = PetscSqr(ctx->charges[ii]/m_0)*m_0/ctx->masses[ii];
277: nu_beta[ii] = PetscSqr(ctx->charges[ii]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
278: }
279: }
280: if (ctx->deviceType == LANDAU_CUDA) {
281: #if defined(PETSC_HAVE_CUDA)
282: LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d);
283: #else
284: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
285: #endif
286: } else if (ctx->deviceType == LANDAU_KOKKOS) {
287: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
288: LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass,invJ_a,xx,yy,zz,ww,&ctx->SData_d);
289: #else
290: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
291: #endif
292: }
293: #endif
294: /* free */
295: PetscFree4(ww,xx,yy,invJ_a);
296: if (dim==3) {
297: PetscFree(zz);
298: }
299: } else { /* CPU version, just copy in, only use part */
300: ctx->SData_d.w = (void*)ww;
301: ctx->SData_d.x = (void*)xx;
302: ctx->SData_d.y = (void*)yy;
303: ctx->SData_d.z = (void*)zz;
304: ctx->SData_d.invJ = (void*)invJ_a;
305: }
306: ctx->initialized = PETSC_TRUE;
307: PetscLogEventEnd(ctx->events[7],0,0,0,0);
308: } // initialize
310: if (shift==0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
311: DM pack;
312: VecGetDM(a_X, &pack);
313: if (!pack) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "pack has no DM");
314: PetscLogEventBegin(ctx->events[1],0,0,0,0);
315: MatZeroEntries(JacP);
316: for (fieldA=0;fieldA<ctx->num_species;fieldA++) {
317: Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
318: if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */
319: }
320: if (!ctx->gpu_assembly || !container) {
321: Vec locXarray[LANDAU_MAX_GRIDS],globXarray[LANDAU_MAX_GRIDS];
322: PetscScalar *cellClosure_it;
323: PetscInt cellClosure_sz=0;
325: /* count cellClosure size */
326: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) cellClosure_sz += Nb*Nf[grid]*numCells[grid];
327: PetscMalloc1(cellClosure_sz,&cellClosure);
328: cellClosure_it = cellClosure;
329: /* for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) { */
330: /* DMClearLocalVectors(ctx->plex[grid]); */
331: /* } */
332: /* DMClearLocalVectors(pack); */
333: DMCompositeGetLocalAccessArray(pack, a_X, ctx->num_grids, NULL, locXarray);
334: DMCompositeGetAccessArray(pack, a_X, ctx->num_grids, NULL, globXarray);
335: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
336: Vec locX = locXarray[grid], globX = globXarray[grid], locX2;
337: PetscInt cStart, cEnd, ei;
338: VecDuplicate(locX,&locX2);
339: DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2);
340: DMGlobalToLocalEnd (ctx->plex[grid], globX, INSERT_VALUES, locX2);
341: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
342: for (ei = cStart ; ei < cEnd; ++ei) {
343: PetscScalar *coef = NULL;
344: DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
345: PetscMemcpy(cellClosure_it,coef,Nb*Nf[grid]*sizeof(*cellClosure_it)); /* change if LandauIPReal != PetscScalar */
346: DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
347: cellClosure_it += Nb*Nf[grid];
348: }
349: VecDestroy(&locX2);
350: }
351: if (cellClosure_it-cellClosure != cellClosure_sz) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %D != cellClosure_sz = %D",cellClosure_it-cellClosure,cellClosure_sz);
352: DMCompositeRestoreLocalAccessArray(pack, a_X, ctx->num_grids, NULL, locXarray);
353: DMCompositeRestoreAccessArray(pack, a_X, ctx->num_grids, NULL, globXarray);
354: xdata = NULL;
355: } else {
356: PetscMemType mtype;
357: VecGetArrayReadAndMemType(a_X,&xdata,&mtype);
358: if (mtype!=PETSC_MEMTYPE_HOST && ctx->deviceType == LANDAU_CPU) {
359: SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"CPU run with device data: use -mat_type aij");
360: }
361: cellClosure = NULL;
362: }
363: PetscLogEventEnd(ctx->events[1],0,0,0,0);
364: } else xdata = cellClosure = NULL;
365: /* do it */
366: if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
367: if (ctx->deviceType == LANDAU_CUDA) {
368: #if defined(PETSC_HAVE_CUDA)
369: LandauCUDAJacobian(ctx->plex,Nq,ctx->num_grids,numCells,Eq_m,cellClosure,N,xdata,&ctx->SData_d,ctx->subThreadBlockSize,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ, JacP);
370: #else
371: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
372: #endif
373: } else if (ctx->deviceType == LANDAU_KOKKOS) {
374: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
375: LandauKokkosJacobian(ctx->plex,Nq,ctx->num_grids,numCells,Eq_m,cellClosure,N,xdata,&ctx->SData_d,ctx->subThreadBlockSize,shift,ctx->events,ctx->mat_offset, ctx->species_offset, subJ,JacP);
376: #else
377: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
378: #endif
379: }
380: } else { /* CPU version */
381: PetscInt IPf_sz = 0;
382: PetscScalar coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ], *cellClosure_it;
383: PetscReal *ff, *dudx, *dudy, *dudz, *invJ, *invJ_a = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
384: const PetscReal *const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
385: PetscReal Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
386: if (shift==0.0) { /* compute dynamic data f and df and init data for Jacobian */
387: PetscInt IPf_idx = 0;
388: PetscLogEventBegin(ctx->events[8],0,0,0,0);
389: /* count IPf size */
390: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) IPf_sz += Nq*Nf[grid]*numCells[grid]; // same as closure size
391: for (fieldA=0;fieldA<ctx->num_species;fieldA++) {
392: invMass[fieldA] = m_0/ctx->masses[fieldA];
393: Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
394: if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */
395: nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/m_0)*m_0/ctx->masses[fieldA];
396: nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
397: }
398: PetscMalloc4(IPf_sz, &ff, IPf_sz, &dudx, IPf_sz, &dudy, dim==3 ? IPf_sz : 0, &dudz);
399: invJ = invJ_a;
400: cellClosure_it = cellClosure;
401: for (PetscInt grid = 0 ; grid < ctx->num_grids ; grid++) { // IPf_idx += nip_loc*Nf
402: PetscInt moffset = ctx->mat_offset[grid], nip_loc = numCells[grid]*Nq, Nfloc = ctx->species_offset[grid+1] - ctx->species_offset[grid];
403: for (PetscInt ei = 0, jpidx_g = 0; ei < numCells[grid]; ++ei, invJ += Nq*dim*dim, cellClosure_it += Nb*Nfloc) {
404: PetscScalar *coef;
405: PetscInt b,f,q;
406: PetscReal u_x[LANDAU_MAX_SPECIES][LANDAU_DIM];
407: if (cellClosure) {
408: coef = cellClosure_it; // this is const
409: } else {
410: coef = coef_buff;
411: for (f = 0; f < Nfloc; ++f) {
412: LandauIdx *const Idxs = &maps[grid].gIdx[ei][f][0];
413: for (b = 0; b < Nb; ++b) {
414: PetscInt idx = Idxs[b];
415: if (idx >= 0) {
416: coef[f*Nb+b] = xdata[idx+moffset];
417: } else {
418: idx = -idx - 1;
419: coef[f*Nb+b] = 0;
420: for (q = 0; q < maps[grid].num_face; q++) {
421: PetscInt id = maps[grid].c_maps[idx][q].gid;
422: PetscScalar scale = maps[grid].c_maps[idx][q].scale;
423: coef[f*Nb+b] += scale*xdata[id+moffset];
424: }
425: }
426: }
427: }
428: }
429: /* get f and df */
430: for (PetscInt qi = 0; qi < Nq; qi++, jpidx_g++) {
431: const PetscReal *Bq = &BB[qi*Nb];
432: const PetscReal *Dq = &DD[qi*Nb*dim];
433: /* get f & df */
434: for (f = 0; f < Nfloc; ++f) {
435: const PetscInt idx = IPf_idx + f*nip_loc + jpidx_g;
436: PetscInt b, e;
437: PetscReal refSpaceDer[LANDAU_DIM];
438: ff[idx] = 0.0;
439: for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
440: for (b = 0; b < Nb; ++b) {
441: const PetscInt cidx = b;
442: ff[idx] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
443: for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
444: }
445: for (d = 0; d < dim; ++d) {
446: for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) {
447: u_x[f][d] += invJ[qi * dim * dim + e*dim+d]*refSpaceDer[e];
448: }
449: }
450: }
451: for (f=0;f<Nfloc;f++) {
452: const PetscInt idx = IPf_idx + f*nip_loc + jpidx_g;
453: dudx[idx] = u_x[f][0];
454: dudy[idx] = u_x[f][1];
455: #if LANDAU_DIM==3
456: dudz[idx] = u_x[f][2];
457: #endif
458: }
459: } // q
460: } // ei elem
461: IPf_idx += nip_loc*Nfloc;
462: } // grid
463: if (cellClosure && ((cellClosure_it-cellClosure) != IPf_sz)) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "iteration wrong %D != nip_loc*Nf = %D",cellClosure_it-cellClosure,IPf_sz);
464: if (IPf_idx != IPf_sz) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %D %D",IPf_idx,IPf_sz);
465: PetscLogEventEnd(ctx->events[8],0,0,0,0);
466: } // Jacobian setup
468: /* doit it */
469: invJ = invJ_a;
470: for (PetscInt grid = 0, jpidx = 0 ; grid < ctx->num_grids ; grid++) {
471: const PetscReal * const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
472: PetscInt cStart, Nfloc_j = Nf[grid], moffset = ctx->mat_offset[grid], totDim = Nfloc_j*Nq, elemMatSize = totDim*totDim;
473: PetscScalar *elemMat;
475: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL); // to be safe, for initial DMPlexMatSetClosure
476: PetscMalloc1(elemMatSize, &elemMat);
477: for (PetscInt ei = 0; ei < numCells[grid]; ++ei, invJ += Nq*dim*dim) {
478: PetscMemzero(elemMat, elemMatSize*sizeof(*elemMat));
479: PetscLogEventBegin(ctx->events[4],0,0,0,0);
480: for (qj = 0; qj < Nq; ++qj, jpidx++) {
481: PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
482: PetscInt d,d2,dp,d3,IPf_idx;
484: if (shift==0.0) {
485: const PetscReal * const invJj = &invJ[qj*dim*dim];
486: PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
487: const PetscReal vj[3] = {xx[jpidx], yy[jpidx], zz ? zz[jpidx] : 0}, wj = ww[jpidx];
488: // create g2 & g3
489: for (d=0;d<dim;d++) { // clear accumulation data D & K
490: gg2_temp[d] = 0;
491: for (d2=0;d2<dim;d2++) gg3_temp[d][d2] = 0;
492: }
493: /* inner beta reduction */
494: IPf_idx = 0;
495: for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids ; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc*Nfloc_r
496: PetscInt nip_loc_r = numCells[grid_r]*Nq, Nfloc_r = Nf[grid_r];
497: for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
498: for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
499: const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
500: PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
501: #if LANDAU_DIM==2
502: PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
503: LandauTensor2D(vj, x, y, Ud, Uk, mask);
504: #else
505: PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0]-x) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1]-y) < 100*PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2]-z) < 100*PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
506: if (ctx->use_relativistic_corrections) {
507: LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
508: } else {
509: LandauTensor3D(vj, x, y, z, U, mask);
510: }
511: #endif
512: for (f = 0; f < Nfloc_r ; ++f) {
513: const PetscInt idx = IPf_idx + f*nip_loc_r + loc_fdf_idx;
514: temp1[0] += dudx[idx]*nu_beta[f+f_off]*invMass[f+f_off];
515: temp1[1] += dudy[idx]*nu_beta[f+f_off]*invMass[f+f_off];
516: #if LANDAU_DIM==3
517: temp1[2] += dudz[idx]*nu_beta[f+f_off]*invMass[f+f_off];
518: #endif
519: temp2 += ff[idx]*nu_beta[f+f_off];
520: }
521: temp1[0] *= wi;
522: temp1[1] *= wi;
523: #if LANDAU_DIM==3
524: temp1[2] *= wi;
525: #endif
526: temp2 *= wi;
527: #if LANDAU_DIM==2
528: for (d2 = 0; d2 < 2; d2++) {
529: for (d3 = 0; d3 < 2; ++d3) {
530: /* K = U * grad(f): g2=e: i,A */
531: gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
532: /* D = -U * (I \kron (fx)): g3=f: i,j,A */
533: gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
534: }
535: }
536: #else
537: for (d2 = 0; d2 < 3; ++d2) {
538: for (d3 = 0; d3 < 3; ++d3) {
539: /* K = U * grad(f): g2 = e: i,A */
540: gg2_temp[d2] += U[d2][d3]*temp1[d3];
541: /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
542: gg3_temp[d2][d3] += U[d2][d3]*temp2;
543: }
544: }
545: #endif
546: } // qi
547: } // ei_r
548: IPf_idx += nip_loc_r*Nfloc_r;
549: } /* grid_r - IPs */
550: if (IPf_idx != IPf_sz) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "IPf_idx != IPf_sz %D %D",IPf_idx,IPf_sz);
551: // add alpha and put in gg2/3
552: for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < Nfloc_j; ++fieldA) {
553: for (d2 = 0; d2 < dim; d2++) {
554: gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA+f_off];
555: for (d3 = 0; d3 < dim; d3++) {
556: gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA+f_off]*invMass[fieldA+f_off];
557: }
558: }
559: }
560: /* add electric field term once per IP */
561: for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid] ; fieldA < Nfloc_j; ++fieldA) {
562: gg2[fieldA][dim-1] += Eq_m[fieldA+f_off];
563: }
564: /* Jacobian transform - g2, g3 */
565: for (PetscInt fieldA = 0; fieldA < Nfloc_j; ++fieldA) {
566: for (d = 0; d < dim; ++d) {
567: g2[fieldA][d] = 0.0;
568: for (d2 = 0; d2 < dim; ++d2) {
569: g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
570: g3[fieldA][d][d2] = 0.0;
571: for (d3 = 0; d3 < dim; ++d3) {
572: for (dp = 0; dp < dim; ++dp) {
573: g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
574: }
575: }
576: g3[fieldA][d][d2] *= wj;
577: }
578: g2[fieldA][d] *= wj;
579: }
580: }
581: } else { // mass
582: PetscReal wj = ww[jpidx];
583: /* Jacobian transform - g0 */
584: for (fieldA = 0; fieldA < Nfloc_j ; ++fieldA) {
585: if (dim==2) {
586: g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
587: } else {
588: g0[fieldA] = wj * shift; // move this to below and remove g0
589: }
590: }
591: }
592: /* FE matrix construction */
593: {
594: PetscInt fieldA,d,f,d2,g;
595: const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
596: /* assemble - on the diagonal (I,I) */
597: for (fieldA = 0; fieldA < Nfloc_j ; fieldA++) {
598: for (f = 0; f < Nb ; f++) {
599: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
600: for (g = 0; g < Nb; ++g) {
601: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
602: const PetscInt fOff = i*totDim + j;
603: if (shift==0.0) {
604: for (d = 0; d < dim; ++d) {
605: elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
606: //printf("\t:%d.%d.%d.%d.%d.%d) elemMat=%e += %e %e %e\n",ej,qj,fieldA,f,g,d,elemMat[fOff],DIq[f*dim+d],g2[fieldA][d],BJq[g]);
607: for (d2 = 0; d2 < dim; ++d2) {
608: elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
609: }
610: }
611: } else { // mass
612: elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
613: }
614: }
615: }
616: }
617: }
618: } /* qj loop */
619: PetscLogEventEnd(ctx->events[4],0,0,0,0);
620: /* assemble matrix */
621: PetscLogEventBegin(ctx->events[6],0,0,0,0);
622: if (!container) {
623: DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[grid], ei + cStart, elemMat, ADD_VALUES);
624: } else { // GPU like assembly for debugging
625: PetscInt fieldA,idx,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE],cols0[LANDAU_MAX_Q_FACE]={0},rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
626: PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE],row_scale[LANDAU_MAX_Q_FACE],col_scale[LANDAU_MAX_Q_FACE]={0};
627: /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
628: for (fieldA = 0; fieldA < Nfloc_j ; fieldA++) {
629: LandauIdx *const Idxs = &maps[grid].gIdx[ei][fieldA][0];
630: //printf("\t\t%d) field %d, moffset=%d\n",ei,fieldA,moffset);
631: for (f = 0; f < Nb ; f++) {
632: idx = Idxs[f];
633: if (idx >= 0) {
634: nr = 1;
635: rows0[0] = idx;
636: row_scale[0] = 1.;
637: } else {
638: idx = -idx - 1;
639: nr = maps[grid].num_face;
640: for (q = 0; q < maps[grid].num_face; q++) {
641: rows0[q] = maps[grid].c_maps[idx][q].gid;
642: row_scale[q] = maps[grid].c_maps[idx][q].scale;
643: }
644: }
645: for (g = 0; g < Nb; ++g) {
646: idx = Idxs[g];
647: if (idx >= 0) {
648: nc = 1;
649: cols0[0] = idx;
650: col_scale[0] = 1.;
651: } else {
652: idx = -idx - 1;
653: nc = maps[grid].num_face;
654: for (q = 0; q < maps[grid].num_face; q++) {
655: cols0[q] = maps[grid].c_maps[idx][q].gid;
656: col_scale[q] = maps[grid].c_maps[idx][q].scale;
657: }
658: }
659: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
660: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
661: const PetscScalar Aij = elemMat[i*totDim + j];
662: for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
663: for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
664: for (q = 0; q < nr; q++) {
665: for (d = 0; d < nc; d++) {
666: vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
667: //printf("\t\t\t%d) field %d, q=(%d.%d) A(%d.%d) = %g\n",ei,fieldA,f,g,rows[q],cols[d],vals[q*nc + d]);
668: }
669: }
670: MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);
671: }
672: }
673: }
674: }
675: if (ei==-1) {
676: PetscErrorCode ierr2;
677: ierr2 = PetscPrintf(ctx->comm,"CPU Element matrix\n");CHKERRQ(ierr2);
678: for (d = 0; d < totDim; ++d) {
679: for (f = 0; f < totDim; ++f) {ierr2 = PetscPrintf(ctx->comm," %12.5e", PetscRealPart(elemMat[d*totDim + f]));CHKERRQ(ierr2);}
680: ierr2 = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr2);
681: }
682: exit(12);
683: }
684: PetscLogEventEnd(ctx->events[6],0,0,0,0);
685: } /* ei cells loop */
686: PetscFree(elemMat);
688: if (!container) { // move nest matrix to global JacP
689: PetscInt moffset = ctx->mat_offset[grid], nloc, nzl, colbuf[1024], row;
690: const PetscInt *cols;
691: const PetscScalar *vals;
692: Mat B = subJ[grid];
694: MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY);
695: MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY);
696: MatGetSize(B, &nloc, NULL);
697: if (nloc != ctx->mat_offset[grid+1] - moffset) SETERRQ2(PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "nloc %D != ctx->mat_offset[grid+1] - moffset = %D",nloc,ctx->mat_offset[grid+1] - moffset);
698: for (int i=0 ; i<nloc ; i++) {
699: MatGetRow(B,i,&nzl,&cols,&vals);
700: if (nzl>1024) SETERRQ1(PetscObjectComm((PetscObject) B), PETSC_ERR_PLIB, "Row too big: %D",nzl);
701: for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + moffset;
702: row = i + moffset;
703: MatSetValues(JacP,1,&row,nzl,colbuf,vals,ADD_VALUES);
704: MatRestoreRow(B,i,&nzl,&cols,&vals);
705: }
706: MatDestroy(&subJ[grid]);
707: }
708: } /* grid */
709: if (shift==0.0) { // mass
710: PetscFree4(ff, dudx, dudy, dudz);
711: }
712: } /* CPU version */
714: /* assemble matrix or vector */
715: MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);
716: MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);
717: #define MAP_BF_SIZE (64*LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
718: if (ctx->gpu_assembly && !container) {
719: PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
720: pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
721: PetscInt q,eidx,fieldA;
722: PetscInfo1(ctx->plex[0], "Make GPU maps %D\n",1);
723: PetscLogEventBegin(ctx->events[2],0,0,0,0);
724: PetscMalloc(sizeof(*maps)*ctx->num_grids, &maps);
725: PetscContainerCreate(PETSC_COMM_SELF, &container);
726: PetscContainerSetPointer(container, (void *)maps);
727: PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy);
728: PetscObjectCompose((PetscObject) JacP, "assembly_maps", (PetscObject) container);
729: PetscContainerDestroy(&container);
730: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
731: PetscInt cStart, cEnd, ej, Nfloc = Nf[grid], totDim = Nfloc*Nq;
732: DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
733: // make maps
734: maps[grid].d_self = NULL;
735: maps[grid].num_elements = numCells[grid];
736: maps[grid].num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
737: maps[grid].num_face = (PetscInt)(pow(maps[grid].num_face,(double)(dim-1))+.001); // Q^2
738: maps[grid].num_reduced = 0;
739: maps[grid].deviceType = ctx->deviceType;
740: maps[grid].numgrids = ctx->num_grids;
741: // count reduced and get
742: PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx);
743: for (fieldA=0;fieldA<Nf[grid];fieldA++) {
744: for (ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx) {
745: for (q = 0; q < Nb; ++q) {
746: PetscInt numindices,*indices;
747: PetscScalar *valuesOrig = elMat = elemMatrix;
748: PetscMemzero(elMat, totDim*totDim*sizeof(*elMat));
749: elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
750: DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
751: for (f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
752: if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
753: // found it
754: if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) {
755: maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; // normal vertex 1.0
756: } else { //found a constraint
757: int jj = 0;
758: PetscReal sum = 0;
759: const PetscInt ff = f;
760: maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // gid = -(idx+1): idx = -gid - 1
761: do { // constraints are continous in Plex - exploit that here
762: int ii;
763: for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // DMPlex puts them all together
764: if (ff + ii < numindices) {
765: pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
766: }
767: }
768: sum += pointMaps[maps[grid].num_reduced][jj].scale;
769: if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps -- all contiguous???
770: else pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
771: } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
772: while (jj++ < maps[grid].num_face) {
773: pointMaps[maps[grid].num_reduced][jj].scale = 0;
774: pointMaps[maps[grid].num_reduced][jj].gid = -1;
775: }
776: if (PetscAbs(sum-1.0) > 10*PETSC_MACHINE_EPSILON) { // debug
777: int d,f;
778: PetscReal tmp = 0;
779: PetscPrintf(PETSC_COMM_SELF,"\t\t%D.%D.%D) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%D)\n",eidx,q,fieldA,sum,LANDAU_MAX_Q_FACE,maps[grid].num_face);
780: for (d = 0, tmp = 0; d < numindices; ++d) {
781: if (tmp!=0 && PetscAbs(tmp-1.0) > 10*PETSC_MACHINE_EPSILON) {PetscPrintf(PETSC_COMM_WORLD,"%3D) %3D: ",d,indices[d]);}
782: for (f = 0; f < numindices; ++f) {
783: tmp += PetscRealPart(elMat[d*numindices + f]);
784: }
785: if (tmp!=0) {PetscPrintf(ctx->comm," | %22.16e\n",tmp);}
786: }
787: }
788: maps[grid].num_reduced++;
789: if (maps[grid].num_reduced>=MAP_BF_SIZE) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps[grid].num_reduced %d > %d",maps[grid].num_reduced,MAP_BF_SIZE);
790: }
791: break;
792: }
793: }
794: // cleanup
795: DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
796: if (elMat != valuesOrig) {DMRestoreWorkArray(ctx->plex[grid], numindices*numindices, MPIU_SCALAR, &elMat);}
797: }
798: }
799: }
800: // allocate and copy point datamaps[grid].gIdx[eidx][field][q]
801: PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps);
802: for (ej = 0; ej < maps[grid].num_reduced; ++ej) {
803: for (q = 0; q < maps[grid].num_face; ++q) {
804: maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
805: maps[grid].c_maps[ej][q].gid = pointMaps[ej][q].gid;
806: }
807: }
808: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
809: if (ctx->deviceType == LANDAU_KOKKOS) {
810: LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid); // imples Kokkos does
811: } // else could be CUDA
812: #endif
813: #if defined(PETSC_HAVE_CUDA)
814: if (ctx->deviceType == LANDAU_CUDA) {
815: LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid);
816: }
817: #endif
818: } /* grids */
819: PetscLogEventEnd(ctx->events[2],0,0,0,0);
820: } /* first pass with GPU assembly */
821: /* clean up */
822: if (cellClosure) {
823: PetscFree(cellClosure);
824: }
825: if (xdata) {
826: VecRestoreArrayReadAndMemType(a_X,&xdata);
827: }
829: return(0);
830: }
832: #if defined(LANDAU_ADD_BCS)
833: static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
834: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
835: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
836: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
837: {
838: uexact[0] = 0;
839: }
840: #endif
842: #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
843: static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
844: PetscReal *outX, PetscReal *outY)
845: {
846: PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
847: if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
848: *outX = x; *outY = y;
849: } else {
850: const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
851: PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
852: if (num_sections==2) {
853: rotcos = 0.70710678118654;
854: outfact = 1.5; efact = 2.5;
855: /* rotate normalized vector into [-pi/4,pi/4) */
856: if (sinphi >= 0.) { /* top cell, -pi/2 */
857: cth = 0.707106781186548; sth = -0.707106781186548;
858: } else { /* bottom cell -pi/8 */
859: cth = 0.707106781186548; sth = .707106781186548;
860: }
861: } else if (num_sections==3) {
862: rotcos = 0.86602540378443;
863: outfact = 1.5; efact = 2.5;
864: /* rotate normalized vector into [-pi/6,pi/6) */
865: if (sinphi >= 0.5) { /* top cell, -pi/3 */
866: cth = 0.5; sth = -0.866025403784439;
867: } else if (sinphi >= -.5) { /* mid cell 0 */
868: cth = 1.; sth = .0;
869: } else { /* bottom cell +pi/3 */
870: cth = 0.5; sth = 0.866025403784439;
871: }
872: } else if (num_sections==4) {
873: rotcos = 0.9238795325112;
874: outfact = 1.5; efact = 3;
875: /* rotate normalized vector into [-pi/8,pi/8) */
876: if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */
877: cth = 0.38268343236509; sth = -0.923879532511287;
878: } else if (sinphi >= 0.) { /* mid top cell -pi/8 */
879: cth = 0.923879532511287; sth = -.38268343236509;
880: } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
881: cth = 0.923879532511287; sth = 0.38268343236509;
882: } else { /* bottom cell + 3pi/8 */
883: cth = 0.38268343236509; sth = .923879532511287;
884: }
885: } else {
886: cth = 0.; sth = 0.; rotcos = 0; efact = 0;
887: }
888: Rth[0][0] = cth; Rth[0][1] =-sth;
889: Rth[1][0] = sth; Rth[1][1] = cth;
890: MATVEC2(Rth,xy,xyprime);
891: if (num_sections==2) {
892: newrr = xyprime[0]/rotcos;
893: } else {
894: PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
895: PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
896: newrr = rin + routfrac*nroutmax;
897: }
898: *outX = cosphi*newrr; *outY = sinphi*newrr;
899: /* grade */
900: PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
901: if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
902: else { rs = r1; re = r2; fact = efact;} /* electron zone */
903: tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
904: *outX *= tt;
905: *outY *= tt;
906: }
907: }
909: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
910: {
911: LandauCtx *ctx = (LandauCtx*)a_ctx;
912: PetscReal r = abc[0], z = abc[1];
913: if (ctx->inflate) {
914: PetscReal absR, absZ;
915: absR = PetscAbs(r);
916: absZ = PetscAbs(z);
917: CircleInflate(ctx->i_radius[0],ctx->e_radius,ctx->radius[0],ctx->num_sections,absR,absZ,&absR,&absZ); // wrong: how do I know what grid I am on?
918: r = (r > 0) ? absR : -absR;
919: z = (z > 0) ? absZ : -absZ;
920: }
921: xyz[0] = r;
922: xyz[1] = z;
923: if (dim==3) xyz[2] = abc[2];
925: return(0);
926: }
928: /* create DMComposite of meshes for each species group */
929: static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM *pack)
930: {
932: size_t len;
933: char fname[128] = ""; /* we can add a file if we want, for each grid */
936: /* create DM */
937: PetscStrlen(fname, &len);
938: if (len) { // not used, need to loop over grids
939: PetscInt dim2;
940: DMPlexCreateFromFile(comm_self, fname, ctx->interpolate, pack);
941: DMGetDimension(*pack, &dim2);
942: if (LANDAU_DIM != dim2) SETERRQ2(comm_self, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim2,LANDAU_DIM);
943: } else { /* p4est, quads */
944: DMCompositeCreate(comm_self,pack);
945: /* Create plex mesh of Landau domain */
946: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
947: PetscReal radius = ctx->radius[grid];
948: if (!ctx->sphere) {
949: PetscInt cells[] = {2,2,2};
950: PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
951: DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
952: if (dim==2) { lo[0] = 0; cells[0] = 1; }
953: DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid]); // todo: make composite and create dm[grid] here
954: DMLocalizeCoordinates(ctx->plex[grid]); /* needed for periodic */
955: if (dim==3) {PetscObjectSetName((PetscObject) ctx->plex[grid], "cube");}
956: else {PetscObjectSetName((PetscObject) ctx->plex[grid], "half-plane");}
957: } else if (dim==2) { // sphere is all wrong. should just have one inner radius
958: PetscInt numCells,cells[16][4],i,j;
959: PetscInt numVerts;
960: PetscReal inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
961: PetscReal *flatCoords = NULL;
962: PetscInt *flatCells = NULL, *pcell;
963: if (ctx->num_sections==2) {
964: #if 1
965: numCells = 5;
966: numVerts = 10;
967: int cells2[][4] = { {0,1,4,3},
968: {1,2,5,4},
969: {3,4,7,6},
970: {4,5,8,7},
971: {6,7,8,9} };
972: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
973: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
974: {
975: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
976: for (j = 0; j < numVerts-1; j++) {
977: PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
978: PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
979: z = rad * PetscSinReal(theta);
980: coords[j][1] = z;
981: r = rad * PetscCosReal(theta);
982: coords[j][0] = r;
983: }
984: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
985: }
986: #else
987: numCells = 4;
988: numVerts = 8;
989: static int cells2[][4] = {{0,1,2,3},
990: {4,5,1,0},
991: {5,6,2,1},
992: {6,7,3,2}};
993: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
994: loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
995: {
996: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
997: PetscInt j;
998: for (j = 0; j < 8; j++) {
999: PetscReal z, r;
1000: PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
1001: PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
1002: z = rad * PetscSinReal(theta);
1003: coords[j][1] = z;
1004: r = rad * PetscCosReal(theta);
1005: coords[j][0] = r;
1006: }
1007: }
1008: #endif
1009: } else if (ctx->num_sections==3) {
1010: numCells = 7;
1011: numVerts = 12;
1012: int cells2[][4] = { {0,1,5,4},
1013: {1,2,6,5},
1014: {2,3,7,6},
1015: {4,5,9,8},
1016: {5,6,10,9},
1017: {6,7,11,10},
1018: {8,9,10,11} };
1019: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
1020: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
1021: {
1022: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
1023: for (j = 0; j < numVerts; j++) {
1024: PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
1025: PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
1026: z = rad * PetscSinReal(theta);
1027: coords[j][1] = z;
1028: r = rad * PetscCosReal(theta);
1029: coords[j][0] = r;
1030: }
1031: }
1032: } else if (ctx->num_sections==4) {
1033: numCells = 10;
1034: numVerts = 16;
1035: int cells2[][4] = { {0,1,6,5},
1036: {1,2,7,6},
1037: {2,3,8,7},
1038: {3,4,9,8},
1039: {5,6,11,10},
1040: {6,7,12,11},
1041: {7,8,13,12},
1042: {8,9,14,13},
1043: {10,11,12,15},
1044: {12,13,14,15}};
1045: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
1046: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
1047: {
1048: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
1049: for (j = 0; j < numVerts-1; j++) {
1050: PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
1051: PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
1052: z = rad * PetscSinReal(theta);
1053: coords[j][1] = z;
1054: r = rad * PetscCosReal(theta);
1055: coords[j][0] = r;
1056: }
1057: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
1058: }
1059: } else {
1060: numCells = 0;
1061: numVerts = 0;
1062: }
1063: for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
1064: pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
1065: pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
1066: }
1067: DMPlexCreateFromCellListPetsc(comm_self,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,&ctx->plex[grid]);
1068: PetscFree2(flatCoords,flatCells);
1069: PetscObjectSetName((PetscObject) ctx->plex[grid], "semi-circle");
1070: } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
1072: DMSetFromOptions(ctx->plex[grid]);
1073: } // grid loop
1074: PetscObjectSetOptionsPrefix((PetscObject)*pack,prefix);
1075: DMSetFromOptions(*pack);
1077: { /* convert to p4est (or whatever), wait for discretization to create pack */
1078: char convType[256];
1079: PetscBool flg;
1080: PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
1081: PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (p4est)","plexland.c",DMList,DMPLEX,convType,256,&flg);
1082: PetscOptionsEnd();
1083: if (flg) {
1084: ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
1085: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1086: DM dmforest;
1087: DMConvert(ctx->plex[grid],convType,&dmforest);
1088: if (dmforest) {
1089: PetscBool isForest;
1090: PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);
1091: DMIsForest(dmforest,&isForest);
1092: if (isForest) {
1093: if (ctx->sphere && ctx->inflate) {
1094: DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);
1095: }
1096: if (dmforest->prealloc_only != ctx->plex[grid]->prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dmforest),PETSC_ERR_PLIB,"plex->prealloc_only != dm->prealloc_only");
1097: DMDestroy(&ctx->plex[grid]);
1098: ctx->plex[grid] = dmforest; // Forest for adaptivity
1099: } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Converted to non Forest?");
1100: } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Convert failed?");
1101: }
1102: } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
1103: }
1104: } /* non-file */
1105: DMSetDimension(*pack, dim);
1106: PetscObjectSetName((PetscObject) *pack, "Mesh");
1107: DMSetApplicationContext(*pack, ctx);
1109: return(0);
1110: }
1112: static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
1113: {
1114: PetscErrorCode ierr;
1115: PetscInt ii,i0;
1116: char buf[256];
1117: PetscSection section;
1120: for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1121: if (ii==0) PetscSNPrintf(buf, 256, "e");
1122: else {PetscSNPrintf(buf, 256, "i%D", ii);}
1123: /* Setup Discretization - FEM */
1124: PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);
1125: PetscObjectSetName((PetscObject) ctx->fe[ii], buf);
1126: DMSetField(ctx->plex[grid], i0, NULL, (PetscObject) ctx->fe[ii]);
1127: }
1128: DMCreateDS(ctx->plex[grid]);
1129: DMGetSection(ctx->plex[grid], §ion);
1130: for (PetscInt ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1131: if (ii==0) PetscSNPrintf(buf, 256, "se");
1132: else PetscSNPrintf(buf, 256, "si%D", ii);
1133: PetscSectionSetComponentName(section, i0, 0, buf);
1134: }
1135: return(0);
1136: }
1138: /* Define a Maxwellian function for testing out the operator. */
1140: /* Using cartesian velocity space coordinates, the particle */
1141: /* density, [1/m^3], is defined according to */
1143: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
1145: /* Using some constant, c, we normalize the velocity vector into a */
1146: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
1148: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
1150: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
1151: /* for finding the particle within the interval in a box dx^3 around x is */
1153: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
1155: typedef struct {
1156: PetscReal v_0;
1157: PetscReal kT_m;
1158: PetscReal n;
1159: PetscReal shift;
1160: } MaxwellianCtx;
1162: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
1163: {
1164: MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
1165: PetscInt i;
1166: PetscReal v2 = 0, theta = 2*mctx->kT_m/(mctx->v_0*mctx->v_0); /* theta = 2kT/mc^2 */
1168: /* compute the exponents, v^2 */
1169: for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
1170: /* evaluate the Maxwellian */
1171: u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1172: if (mctx->shift!=0.) {
1173: v2 = 0;
1174: for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
1175: v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
1176: /* evaluate the shifted Maxwellian */
1177: u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
1178: }
1179: return(0);
1180: }
1182: /*@
1183: LandauAddMaxwellians - Add a Maxwellian distribution to a state
1185: Collective on X
1187: Input Parameters:
1188: . dm - The mesh (local)
1189: + time - Current time
1190: - temps - Temperatures of each species (global)
1191: . ns - Number density of each species (global)
1192: - grid - index into current grid - just used for offset into temp and ns
1193: + actx - Landau context
1195: Output Parameter:
1196: . X - The state (local to this grid)
1198: Level: beginner
1200: .keywords: mesh
1201: .seealso: LandauCreateVelocitySpace()
1202: @*/
1203: PetscErrorCode LandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, void *actx)
1204: {
1205: LandauCtx *ctx = (LandauCtx*)actx;
1206: PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
1207: PetscErrorCode ierr,ii,i0;
1208: PetscInt dim;
1209: MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
1212: DMGetDimension(dm, &dim);
1213: if (!ctx) { DMGetApplicationContext(dm, &ctx); }
1214: for (ii = ctx->species_offset[grid], i0 = 0 ; ii < ctx->species_offset[grid+1] ; ii++, i0++) {
1215: mctxs[i0] = &data[i0];
1216: data[i0].v_0 = ctx->v_0; // v_0 same for whole grid
1217: data[i0].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
1218: data[i0].n = ns[ii];
1219: initu[i0] = maxwellian;
1220: data[i0].shift = 0;
1221: }
1222: data[0].shift = ctx->electronShift;
1223: /* need to make ADD_ALL_VALUES work - TODO */
1224: DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);
1225: return(0);
1226: }
1228: /*
1229: LandauSetInitialCondition - Addes Maxwellians with context
1231: Collective on X
1233: Input Parameters:
1234: . dm - The mesh
1235: - grid - index into current grid - just used for offset into temp and ns
1236: + actx - Landau context with T and n
1238: Output Parameter:
1239: . X - The state
1241: Level: beginner
1243: .keywords: mesh
1244: .seealso: LandauCreateVelocitySpace(), LandauAddMaxwellians()
1245: */
1246: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, void *actx)
1247: {
1248: LandauCtx *ctx = (LandauCtx*)actx;
1251: if (!ctx) { DMGetApplicationContext(dm, &ctx); }
1252: VecZeroEntries(X);
1253: LandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, ctx);
1254: return(0);
1255: }
1257: // adapt a level once. Forest in/out
1258: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1259: {
1260: DM forest, plex, adaptedDM = NULL;
1261: PetscDS prob;
1262: PetscBool isForest;
1263: PetscQuadrature quad;
1264: PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1265: DMLabel adaptLabel = NULL;
1266: PetscErrorCode ierr;
1269: forest = ctx->plex[grid];
1270: DMCreateDS(forest);
1271: DMGetDS(forest, &prob);
1272: DMGetDimension(forest, &dim);
1273: DMIsForest(forest, &isForest);
1274: if (!isForest) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"! Forest");
1275: DMConvert(forest, DMPLEX, &plex);
1276: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1277: DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);
1278: PetscFEGetQuadrature(fem, &quad);
1279: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
1280: if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
1281: PetscDSGetDimensions(prob, &Nb);
1282: if (type==4) {
1283: for (c = cStart; c < cEnd; c++) {
1284: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1285: }
1286: PetscInfo1(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");
1287: } else if (type==2) {
1288: PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1289: PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1290: for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1291: for (c = cStart; c < cEnd; c++) {
1292: PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1293: DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);
1294: for (qj = 0; qj < Nq; ++qj) {
1295: tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1296: r = PetscSqrtReal(tt);
1297: if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1298: minRad = r;
1299: nr = 0;
1300: rCellIdx[nr++]= c;
1301: PetscInfo4(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %D, qp %D/%D\n", r, c, qj+1, Nq);
1302: } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1303: for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1304: if (k==nr) {
1305: rCellIdx[nr++]= c;
1306: PetscInfo5(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %D, qp %D/%D, d=%e\n", r, c, qj+1, Nq, r-minRad);
1307: }
1308: }
1309: if (ctx->sphere) {
1310: if ((tt=r-ctx->e_radius) > 0) {
1311: PetscInfo2(sol, "\t\t\t %D cell r=%g\n",c,tt);
1312: if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1313: eMinRad = tt;
1314: eMaxIdx = 0;
1315: eCellIdx[eMaxIdx++] = c;
1316: } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1317: eCellIdx[eMaxIdx++] = c;
1318: }
1319: }
1320: if ((tt=r-ctx->i_radius[grid]) > 0) {
1321: if (tt < iMinRad - 1.e-5) {
1322: iMinRad = tt;
1323: iMaxIdx = 0;
1324: iCellIdx[iMaxIdx++] = c;
1325: } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1326: iCellIdx[iMaxIdx++] = c;
1327: }
1328: }
1329: }
1330: }
1331: }
1332: for (k=0;k<nr;k++) {
1333: DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);
1334: }
1335: if (ctx->sphere) {
1336: for (c = 0; c < eMaxIdx; c++) {
1337: DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);
1338: PetscInfo3(sol, "\t\tPhase:%s: refine sphere e cell %D r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);
1339: }
1340: for (c = 0; c < iMaxIdx; c++) {
1341: DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);
1342: PetscInfo3(sol, "\t\tPhase:%s: refine sphere i cell %D r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);
1343: }
1344: }
1345: PetscInfo4(sol, "Phase:%s: Adaptive refine origin cells %D,%D r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);
1346: } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1347: PetscScalar *coef = NULL;
1348: Vec coords;
1349: PetscInt csize,Nv,d,nz;
1350: DM cdm;
1351: PetscSection cs;
1352: DMGetCoordinatesLocal(forest, &coords);
1353: DMGetCoordinateDM(forest, &cdm);
1354: DMGetLocalSection(cdm, &cs);
1355: for (c = cStart; c < cEnd; c++) {
1356: PetscInt doit = 0, outside = 0;
1357: DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);
1358: Nv = csize/dim;
1359: for (nz = d = 0; d < Nv; d++) {
1360: PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1361: x = PetscSqrtReal(x);
1362: if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */
1363: else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */
1364: else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1365: else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1366: if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1367: }
1368: DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);
1369: if (doit || (outside<Nv && nz)) {
1370: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1371: }
1372: }
1373: PetscInfo1(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");
1374: }
1375: DMDestroy(&plex);
1376: DMAdaptLabel(forest, adaptLabel, &adaptedDM);
1377: DMLabelDestroy(&adaptLabel);
1378: *newForest = adaptedDM;
1379: if (adaptedDM) {
1380: if (isForest) {
1381: DMForestSetAdaptivityForest(adaptedDM,NULL); // ????
1382: } else exit(33); // ???????
1383: DMConvert(adaptedDM, DMPLEX, &plex);
1384: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1385: PetscInfo2(sol, "\tPhase: adaptToleranceFEM: %D cells, %d total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));
1386: DMDestroy(&plex);
1387: } else *newForest = NULL;
1388: return(0);
1389: }
1391: // forest goes in (ctx->plex[grid]), plex comes out
1392: static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1393: {
1394: PetscErrorCode ierr;
1395: PetscInt adaptIter;
1398: PetscInt type, limits[5] = {(grid==0) ? ctx->numRERefine : 0, (grid==0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid==0) ? ctx->nZRefine2 : 0,ctx->postAMRRefine[grid]};
1399: for (type=0;type<5;type++) {
1400: for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1401: DM newForest = NULL;
1402: adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest);
1403: if (newForest) {
1404: DMDestroy(&ctx->plex[grid]);
1405: VecDestroy(uu);
1406: DMCreateGlobalVector(newForest,uu);
1407: PetscObjectSetName((PetscObject) *uu, "uAMR");
1408: LandauSetInitialCondition(newForest, *uu, grid, ctx);
1409: ctx->plex[grid] = newForest;
1410: } else {
1411: exit(4); // can happen with no AMR and post refinement
1412: }
1413: }
1414: }
1415: return(0);
1416: }
1418: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1419: {
1420: PetscErrorCode ierr;
1421: PetscBool flg, sph_flg;
1422: PetscInt ii,nt,nm,nc,num_species_grid[LANDAU_MAX_GRIDS];
1423: PetscReal v0_grid[LANDAU_MAX_GRIDS];
1424: DM dummy;
1427: DMCreate(ctx->comm,&dummy);
1428: /* get options - initialize context */
1429: ctx->verbose = 1;
1430: ctx->interpolate = PETSC_TRUE;
1431: ctx->gpu_assembly = PETSC_TRUE;
1432: ctx->aux_bool = PETSC_FALSE;
1433: ctx->electronShift = 0;
1434: ctx->M = NULL;
1435: ctx->J = NULL;
1436: /* geometry and grids */
1437: ctx->sphere = PETSC_FALSE;
1438: ctx->inflate = PETSC_FALSE;
1439: ctx->aux_bool = PETSC_FALSE;
1440: ctx->use_p4est = PETSC_FALSE;
1441: ctx->num_sections = 3; /* 2, 3 or 4 */
1442: for (PetscInt grid=0;grid<LANDAU_MAX_GRIDS;grid++) {
1443: ctx->radius[grid] = 5.; /* thermal radius (velocity) */
1444: ctx->numAMRRefine[grid] = 5;
1445: ctx->postAMRRefine[grid] = 0;
1446: ctx->species_offset[grid+1] = 1; // one species default
1447: num_species_grid[grid] = 0;
1448: ctx->plex[grid] = NULL; /* cache as expensive to Convert */
1449: v0_grid[grid] = 1;
1450: }
1451: ctx->species_offset[0] = 0;
1452: ctx->re_radius = 0.;
1453: ctx->vperp0_radius1 = 0;
1454: ctx->vperp0_radius2 = 0;
1455: ctx->nZRefine1 = 0;
1456: ctx->nZRefine2 = 0;
1457: ctx->numRERefine = 0;
1458: num_species_grid[0] = 1; // one species default
1459: /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1460: ctx->charges[0] = -1; /* electron charge (MKS) */
1461: ctx->masses[0] = 1/1835.469965278441013; /* temporary value in proton mass */
1462: ctx->n[0] = 1;
1463: ctx->v_0 = 1; /* thermal velocity, we could start with a scale != 1 */
1464: ctx->thermal_temps[0] = 1;
1465: /* constants, etc. */
1466: ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1467: ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1468: ctx->lnLam = 10; /* cross section ratio large - small angle collisions */
1469: ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */
1470: ctx->Ez = 0;
1471: ctx->subThreadBlockSize = 1; /* for device and maybe OMP */
1472: ctx->numConcurrency = 1; /* for device */
1473: ctx->times[0] = 0;
1474: ctx->initialized = PETSC_FALSE; // doit first time
1475: ctx->use_matrix_mass = PETSC_FALSE; /* fast but slightly fragile */
1476: ctx->use_relativistic_corrections = PETSC_FALSE;
1477: ctx->use_energy_tensor_trick = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1478: ctx->SData_d.w = NULL;
1479: ctx->SData_d.x = NULL;
1480: ctx->SData_d.y = NULL;
1481: ctx->SData_d.z = NULL;
1482: ctx->SData_d.invJ = NULL;
1483: PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1484: {
1485: char opstring[256];
1486: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1487: ctx->deviceType = LANDAU_KOKKOS;
1488: PetscStrcpy(opstring,"kokkos");
1489: #if defined(PETSC_HAVE_CUDA)
1490: ctx->subThreadBlockSize = 16;
1491: #endif
1492: #elif defined(PETSC_HAVE_CUDA)
1493: ctx->deviceType = LANDAU_CUDA;
1494: PetscStrcpy(opstring,"cuda");
1495: #else
1496: ctx->deviceType = LANDAU_CPU;
1497: PetscStrcpy(opstring,"cpu");
1498: ctx->subThreadBlockSize = 0;
1499: #endif
1500: PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,256,NULL);
1501: PetscStrcmp("cpu",opstring,&flg);
1502: if (flg) {
1503: ctx->deviceType = LANDAU_CPU;
1504: ctx->subThreadBlockSize = 0;
1505: } else {
1506: PetscStrcmp("cuda",opstring,&flg);
1507: if (flg) {
1508: ctx->deviceType = LANDAU_CUDA;
1509: ctx->subThreadBlockSize = 0;
1510: } else {
1511: PetscStrcmp("kokkos",opstring,&flg);
1512: if (flg) ctx->deviceType = LANDAU_KOKKOS;
1513: else SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1514: }
1515: }
1516: }
1518: PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);
1519: PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL);
1520: PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie criticle field","plexland.c",ctx->Ez,&ctx->Ez, NULL);
1521: PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);
1522: PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);
1523: PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL);
1524: PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL);
1525: PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick, &ctx->use_energy_tensor_trick, NULL);
1527: /* get num species with temperature*/
1528: {
1529: PetscReal arr[100];
1530: nt = 100;
1531: PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV", "plexland.c", arr, &nt, &flg);
1532: if (flg && nt > LANDAU_MAX_SPECIES) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-thermal_temps ,t1,t2,.. number of species %D > MAX %D",nt,LANDAU_MAX_SPECIES);
1533: }
1534: nt = LANDAU_MAX_SPECIES;
1535: for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1536: ctx->thermal_temps[ii] = 1.;
1537: ctx->charges[ii] = 1;
1538: ctx->masses[ii] = 1;
1539: ctx->n[ii] = (ii==1) ? 1 : 0;
1540: }
1541: PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);
1542: if (flg) {
1543: PetscInfo1(dummy, "num_species set to number of thermal temps provided (%D)\n",nt);
1544: ctx->num_species = nt;
1545: } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1546: for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1547: nm = LANDAU_MAX_SPECIES-1;
1548: PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);
1549: if (flg && nm != ctx->num_species-1) {
1550: SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %D != num species %D",nm,ctx->num_species-1);
1551: }
1552: nm = LANDAU_MAX_SPECIES;
1553: PetscOptionsRealArray("-dm_landau_n", "Normalized (by -n_0) number density of each species", "plexland.c", ctx->n, &nm, &flg);
1554: if (flg && nm != ctx->num_species) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %D != num species %D",nm,ctx->num_species);
1555: ctx->n_0 *= ctx->n[0]; /* normalized number density */
1556: for (ii=1;ii<ctx->num_species;ii++) ctx->n[ii] = ctx->n[ii]/ctx->n[0];
1557: ctx->n[0] = 1;
1558: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1559: ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1560: ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1561: nc = LANDAU_MAX_SPECIES-1;
1562: PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);
1563: if (flg && nc != ctx->num_species-1) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %D != num species %D",nc,ctx->num_species-1);
1564: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1565: /* geometry and grids */
1566: nt = LANDAU_MAX_GRIDS;
1567: PetscOptionsIntArray("-dm_landau_num_species_grid","Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid","plexland.c", num_species_grid, &nt, &flg);
1568: if (flg) {
1569: ctx->num_grids = nt;
1570: for (ii=nt=0;ii<ctx->num_grids;ii++) nt += num_species_grid[ii];
1571: if (ctx->num_species != nt) SETERRQ4(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_num_species_grid: sum %D != num_species = %D. %D grids (check that number of grids <= LANDAU_MAX_GRIDS = %D)",nt,ctx->num_species,ctx->num_grids,LANDAU_MAX_GRIDS);
1572: } else {
1573: ctx->num_grids = 1; // go back to a single grid run
1574: num_species_grid[0] = ctx->num_species;
1575: }
1576: for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids ; ii++) ctx->species_offset[ii+1] = ctx->species_offset[ii] + num_species_grid[ii];
1577: if (ctx->species_offset[ctx->num_grids] != ctx->num_species) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"ctx->species_offset[ctx->num_grids] %D != ctx->num_species = %D ???????????",ctx->species_offset[ctx->num_grids],ctx->num_species);
1578: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1579: int iii = ctx->species_offset[grid]; // normalize with first (arbitrary) species on grid
1580: v0_grid[grid] *= PetscSqrtReal(ctx->k*ctx->thermal_temps[iii]/ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1581: }
1582: ii = 0;
1583: //PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL);
1584: ctx->v_0 = v0_grid[ii]; /* arbitrary units for non dimensionalization: mean velocity in 1D of first species on grid */
1585: ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1586: /* domain */
1587: nt = LANDAU_MAX_GRIDS;
1588: PetscOptionsRealArray("-dm_landau_domain_radius","Phase space size in units of thermal velocity of grid","plexland.c",ctx->radius,&nt, &flg);
1589: if (flg && nt < ctx->num_grids) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_domain_radius: given %D radius != number grids %D",nt,ctx->num_grids);
1590: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1591: if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1592: if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1593: else ctx->radius[grid] = -ctx->radius[grid];
1594: ctx->radius[grid] = ctx->radius[grid]*SPEED_OF_LIGHT/ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1595: PetscInfo2(dummy, "Change domain radius to %e for grid %D\n",ctx->radius[grid],grid);
1596: }
1597: ctx->radius[grid] *= v0_grid[grid]/ctx->v_0; // scale domain by thermal radius relative to v_0
1598: }
1599: /* amr parametres */
1600: nt = LANDAU_MAX_GRIDS;
1601: PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg);
1602: if (flg && nt < ctx->num_grids) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_amr_levels_max: given %D != number grids %D",nt,ctx->num_grids);
1603: nt = LANDAU_MAX_GRIDS;
1604: PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg);
1605: for (ii=1;ii<ctx->num_grids;ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1606: PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg);
1607: PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg);
1608: PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg);
1609: PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);
1610: PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);
1611: PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg);
1612: /* spherical domain (not used) */
1613: PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);
1614: PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);
1615: PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg);
1616: PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius, &ctx->e_radius, &flg);
1617: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1618: if (!flg) {
1619: ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0;
1620: }
1621: nt = LANDAU_MAX_GRIDS;
1622: PetscOptionsRealArray("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius, &nt, &flg);
1623: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1624: if (!flg) {
1625: ctx->i_radius[0] = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; // need to correct for ion grid domain
1626: }
1627: if (flg && ctx->num_grids != nt) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_i_radius: %D != num_species = %D",nt,ctx->num_grids);
1628: if (ctx->sphere && ctx->e_radius <= ctx->i_radius[0]) SETERRQ3(ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius[0],ctx->e_radius,ctx->radius[0]);
1629: /* processing options */
1630: PetscOptionsInt("-dm_landau_sub_thread_block_size", "Number of threads in Kokkos integration point subblock", "plexland.c", ctx->subThreadBlockSize, &ctx->subThreadBlockSize, NULL);
1631: PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);
1632: PetscOptionsInt("-dm_landau_num_thread_teams", "The number of other concurrent runs to make room for", "plexland.c", ctx->numConcurrency, &ctx->numConcurrency, NULL);
1634: PetscOptionsEnd();
1635: for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1636: if (ctx->verbose > 0) {
1637: PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);
1638: PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);
1639: PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s\n", ctx->thermal_temps[0], ctx->thermal_temps[1], (ctx->num_species>2) ? ctx->thermal_temps[2] : 0, ctx->v_0, ctx->v_0/SPEED_OF_LIGHT, ctx->n_0, ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive");
1640: PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %D: %10.3e (%D) ",0,ctx->radius[0],ctx->numAMRRefine[0]);
1641: for (ii=1;ii<ctx->num_grids;ii++) PetscPrintf(ctx->comm, ", %D: %10.3e (%D) ",ii,ctx->radius[ii],ctx->numAMRRefine[ii]);
1642: PetscPrintf(ctx->comm,"\n");
1643: }
1644: DMDestroy(&dummy);
1645: {
1646: PetscMPIInt rank;
1647: MPI_Comm_rank(ctx->comm, &rank);
1648: /* PetscLogStage setup_stage; */
1649: PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11]); /* 11 */
1650: PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]); /* 0 */
1651: PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]); /* 9 */
1652: PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]); /* 10 */
1653: PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]); /* 7 */
1654: PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]); /* 1 */
1655: PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]); /* 3 */
1656: PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]); /* 8 */
1657: PetscLogEventRegister(" Kernel (GPU)", DM_CLASSID, &ctx->events[4]); /* 4 */
1658: PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]); /* 5 */
1659: PetscLogEventRegister(" Jac-assemble", DM_CLASSID, &ctx->events[6]); /* 6 */
1660: PetscLogEventRegister(" Jac asmbl setup", DM_CLASSID, &ctx->events[2]); /* 2 */
1661: PetscLogEventRegister(" Other", DM_CLASSID, &ctx->events[13]); /* 13 */
1663: if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1664: PetscOptionsClearValue(NULL,"-snes_converged_reason");
1665: PetscOptionsClearValue(NULL,"-ksp_converged_reason");
1666: PetscOptionsClearValue(NULL,"-snes_monitor");
1667: PetscOptionsClearValue(NULL,"-ksp_monitor");
1668: PetscOptionsClearValue(NULL,"-ts_monitor");
1669: PetscOptionsClearValue(NULL,"-ts_adapt_monitor");
1670: PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");
1671: PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");
1672: PetscOptionsClearValue(NULL,"-dm_landau_mass_dm_view");
1673: PetscOptionsClearValue(NULL,"-dm_landau_mass_view");
1674: PetscOptionsClearValue(NULL,"-dm_landau_jacobian_view");
1675: PetscOptionsClearValue(NULL,"-dm_landau_mat_view");
1676: PetscOptionsClearValue(NULL,"-");
1677: PetscOptionsClearValue(NULL,"-info");
1678: }
1679: }
1680: return(0);
1681: }
1683: /*@C
1684: LandauCreateVelocitySpace - Create a DMPlex velocity space mesh
1686: Collective on comm
1688: Input Parameters:
1689: + comm - The MPI communicator
1690: . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
1691: - prefix - prefix for options (not tested)
1693: Output Parameter:
1694: . pack - The DM object representing the mesh
1695: + X - A vector (user destroys)
1696: - J - Optional matrix (object destroys)
1698: Level: beginner
1700: .keywords: mesh
1701: .seealso: DMPlexCreate(), LandauDestroyVelocitySpace()
1702: @*/
1703: PetscErrorCode LandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
1704: {
1706: LandauCtx *ctx;
1707: PetscBool prealloc_only,flg;
1708: Vec Xsub[LANDAU_MAX_GRIDS];
1711: if (dim!=2 && dim!=3) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
1712: PetscNew(&ctx);
1713: ctx->comm = comm; /* used for diagnostics and global errors */
1714: /* process options */
1715: ProcessOptions(ctx,prefix);
1716: if (dim==2) ctx->use_relativistic_corrections = PETSC_FALSE;
1717: /* Create Mesh */
1718: LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, pack); // creates grids (Forest of AMR)
1719: prealloc_only = (*pack)->prealloc_only;
1720: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
1721: /* create FEM */
1722: SetupDS(ctx->plex[grid],dim,grid,ctx);
1723: /* set initial state */
1724: DMCreateGlobalVector(ctx->plex[grid],&Xsub[grid]);
1725: PetscObjectSetName((PetscObject) Xsub[grid], "u_orig");
1726: /* initial static refinement, no solve */
1727: LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, ctx);
1728: /* forest refinement - forest goes in (if forest), plex comes out */
1729: if (ctx->use_p4est) {
1730: DM plex;
1731: adapt(grid,ctx,&Xsub[grid]); // forest goes in, plex comes out
1732: if (ctx->plex[grid]->prealloc_only != prealloc_only) SETERRQ(PetscObjectComm((PetscObject)pack),PETSC_ERR_PLIB,"ctx->plex[grid]->prealloc_only != prealloc_only");
1733: DMViewFromOptions(ctx->plex[grid],NULL,"-dm_landau_amr_dm_view"); // need to differentiate - todo
1734: VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view");
1735: // convert to plex, all done with this level
1736: DMConvert(ctx->plex[grid], DMPLEX, &plex);
1737: DMDestroy(&ctx->plex[grid]);
1738: ctx->plex[grid] = plex;
1739: }
1740: DMCompositeAddDM(*pack,ctx->plex[grid]);
1741: DMSetApplicationContext(ctx->plex[grid], ctx);
1742: }
1743: DMSetApplicationContext(*pack, ctx);
1744: PetscOptionsInsertString(NULL,"-dm_preallocate_only");
1745: DMSetFromOptions(*pack);
1746: DMCreateMatrix(*pack, &ctx->J);
1747: PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
1748: MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1749: MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1750: PetscObjectSetName((PetscObject)ctx->J, "Jac");
1751: if (J) *J = ctx->J;
1752: // construct X, copy data in
1753: DMCreateGlobalVector(*pack,X);
1754: for (PetscInt grid=0, idx = 0 ; grid < ctx->num_grids ; grid++) {
1755: PetscInt n;
1756: PetscScalar const *values;
1757: VecGetLocalSize(Xsub[grid],&n);
1758: VecGetArrayRead(Xsub[grid],&values);
1759: for (int i=0; i<n; i++, idx++) {
1760: VecSetValue(*X,idx,values[i],INSERT_VALUES);
1761: }
1762: VecRestoreArrayRead(Xsub[grid],&values);
1763: VecDestroy(&Xsub[grid]);
1764: }
1766: /* check for types that we need */
1767: if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1768: if (ctx->deviceType == LANDAU_CUDA) {
1769: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
1770: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda");
1771: } else if (ctx->deviceType == LANDAU_KOKKOS) {
1772: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
1773: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1774: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos");
1775: #else
1776: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels=1' for GPU assembly and Kokkos");
1777: #endif
1778: }
1779: }
1780: return(0);
1781: }
1783: /*@
1784: LandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
1786: Collective on dm
1788: Input/Output Parameters:
1789: . dm - the dm to destroy
1791: Level: beginner
1793: .keywords: mesh
1794: .seealso: LandauCreateVelocitySpace()
1795: @*/
1796: PetscErrorCode LandauDestroyVelocitySpace(DM *dm)
1797: {
1798: PetscErrorCode ierr,ii;
1799: LandauCtx *ctx;
1800: PetscContainer container = NULL;
1802: DMGetApplicationContext(*dm, &ctx);
1803: PetscObjectQuery((PetscObject)ctx->J,"coloring", (PetscObject*)&container);
1804: if (container) {
1805: PetscContainerDestroy(&container);
1806: }
1807: MatDestroy(&ctx->M);
1808: MatDestroy(&ctx->J);
1809: for (ii=0;ii<ctx->num_species;ii++) {
1810: PetscFEDestroy(&ctx->fe[ii]);
1811: }
1812: if (ctx->deviceType == LANDAU_CUDA) {
1813: #if defined(PETSC_HAVE_CUDA)
1814: LandauCUDAStaticDataClear(&ctx->SData_d);
1815: #else
1816: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
1817: #endif
1818: } else if (ctx->deviceType == LANDAU_KOKKOS) {
1819: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1820: LandauKokkosStaticDataClear(&ctx->SData_d);
1821: #else
1822: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
1823: #endif
1824: } else {
1825: if (ctx->SData_d.x) { /* in a CPU run */
1826: PetscReal *invJ = (PetscReal*)ctx->SData_d.invJ, *xx = (PetscReal*)ctx->SData_d.x, *yy = (PetscReal*)ctx->SData_d.y, *zz = (PetscReal*)ctx->SData_d.z, *ww = (PetscReal*)ctx->SData_d.w;
1827: PetscFree4(ww,xx,yy,invJ);
1828: if (zz) {
1829: PetscFree(zz);
1830: }
1831: }
1832: }
1833: if (ctx->times[0] > 0) {
1834: PetscPrintf(ctx->comm, "Landau Operator %d 1.0 %10.3e ....\n",10000,ctx->times[0]);
1835: }
1836: for (PetscInt grid=0 ; grid < ctx->num_grids ; grid++) {
1837: DMDestroy(&ctx->plex[grid]);
1838: }
1839: PetscFree(ctx);
1840: DMDestroy(dm);
1841: return(0);
1842: }
1844: /* < v, ru > */
1845: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1846: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1847: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1848: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1849: {
1850: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1851: f0[0] = u[ii];
1852: }
1854: /* < v, ru > */
1855: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1856: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1857: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1858: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1859: {
1860: PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
1861: f0[0] = x[jj]*u[ii]; /* x momentum */
1862: }
1864: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1865: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1866: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1867: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1868: {
1869: PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
1870: double tmp1 = 0.;
1871: for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
1872: f0[0] = tmp1*u[ii];
1873: }
1875: static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
1876: {
1877: const PetscReal *c2_0_arr = ((PetscReal*)actx);
1878: const PetscReal c02 = c2_0_arr[0];
1881: for (int s = 0 ; s < Nf ; s++) {
1882: PetscReal tmp1 = 0.;
1883: for (int i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
1884: #if defined(PETSC_USE_DEBUG)
1885: u[s] = PetscSqrtReal(1. + tmp1/c02);// u[0] = PetscSqrtReal(1. + xx);
1886: #else
1887: {
1888: PetscReal xx = tmp1/c02;
1889: u[s] = xx/(PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
1890: }
1891: #endif
1892: }
1893: return(0);
1894: }
1896: /* < v, ru > */
1897: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1898: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1899: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1900: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1901: {
1902: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1903: f0[0] = 2.*PETSC_PI*x[0]*u[ii];
1904: }
1906: /* < v, ru > */
1907: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1908: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1909: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1910: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1911: {
1912: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1913: f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
1914: }
1916: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1917: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1918: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1919: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1920: {
1921: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1922: f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
1923: }
1925: /*@
1926: LandauPrintNorms - collects moments and prints them
1928: Collective on dm
1930: Input Parameters:
1931: + X - the state
1932: - stepi - current step to print
1934: Level: beginner
1936: .keywords: mesh
1937: .seealso: LandauCreateVelocitySpace()
1938: @*/
1939: PetscErrorCode LandauPrintNorms(Vec X, PetscInt stepi)
1940: {
1942: LandauCtx *ctx;
1943: PetscDS prob;
1944: DM pack;
1945: PetscInt cStart, cEnd, dim, ii, i0;
1946: PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
1947: PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
1948: Vec globXArray[LANDAU_MAX_GRIDS];
1951: VecGetDM(X, &pack);
1952: if (!pack) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Vector has no DM");
1953: DMGetDimension(pack, &dim);
1954: if (dim!=2 && dim!=3) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "dim= %D",dim);
1955: DMGetApplicationContext(pack, &ctx);
1956: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1957: /* print momentum and energy */
1958: DMCompositeGetAccessArray(pack, X, ctx->num_grids, NULL, globXArray);
1959: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) {
1960: Vec Xloc = globXArray[grid];
1961: DMGetDS(ctx->plex[grid], &prob);
1962: for (ii=ctx->species_offset[grid],i0=0;ii<ctx->species_offset[grid+1];ii++,i0++) {
1963: PetscScalar user[2] = { (PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
1964: PetscDSSetConstants(prob, 2, user);
1965: if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
1966: PetscDSSetObjective(prob, 0, &f0_s_rden);
1967: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1968: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1969: PetscDSSetObjective(prob, 0, &f0_s_rmom);
1970: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1971: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1972: PetscDSSetObjective(prob, 0, &f0_s_rv2);
1973: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1974: energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
1975: zmomentumtot += zmomentum[ii];
1976: energytot += energy[ii];
1977: densitytot += density[ii];
1978: PetscPrintf(ctx->comm, "%3D) species-%D: charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
1979: } else { /* 2/3Xloc + 3V */
1980: PetscDSSetObjective(prob, 0, &f0_s_den);
1981: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1982: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1983: PetscDSSetObjective(prob, 0, &f0_s_mom);
1984: user[1] = 0;
1985: PetscDSSetConstants(prob, 2, user);
1986: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1987: xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1988: user[1] = 1;
1989: PetscDSSetConstants(prob, 2, user);
1990: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1991: ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1992: user[1] = 2;
1993: PetscDSSetConstants(prob, 2, user);
1994: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
1995: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1996: if (ctx->use_relativistic_corrections) {
1997: /* gamma * M * f */
1998: if (ii==0 && grid==0) { // do all at once
1999: Vec Mf, globGamma, globMfarray[LANDAU_MAX_GRIDS], globGammaArray[LANDAU_MAX_GRIDS];
2000: PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *) = {gamma_n_f};
2001: PetscReal *c2_0[1], data[1];
2003: VecDuplicate(X,&globGamma);
2004: VecDuplicate(X,&Mf);
2005: /* M * f */
2006: MatMult(ctx->M,X,Mf);
2007: /* gamma */
2008: DMCompositeGetAccessArray(pack, globGamma, ctx->num_grids, NULL, globGammaArray);
2009: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2010: Vec v1 = globGammaArray[grid];
2011: data[0] = PetscSqr(C_0(ctx->v_0));
2012: c2_0[0] = &data[0];
2013: DMProjectFunction(ctx->plex[grid], 0., gammaf, (void**)c2_0, INSERT_ALL_VALUES, v1);
2014: }
2015: DMCompositeRestoreAccessArray(pack, globGamma, ctx->num_grids, NULL, globGammaArray);
2016: /* gamma * Mf */
2017: DMCompositeGetAccessArray(pack, globGamma, ctx->num_grids, NULL, globGammaArray);
2018: DMCompositeGetAccessArray(pack, Mf, ctx->num_grids, NULL, globMfarray);
2019: for (PetscInt grid = 0; grid < ctx->num_grids ; grid++) { // yes a grid loop in a grid loop to print nice
2020: PetscInt Nf = ctx->species_offset[grid+1] - ctx->species_offset[grid], N, bs;
2021: Vec Mfsub = globMfarray[grid], Gsub = globGammaArray[grid], v1, v2;
2022: // get each component
2023: VecGetSize(Mfsub,&N);
2024: VecCreate(ctx->comm,&v1);
2025: VecSetSizes(v1,PETSC_DECIDE,N/Nf);
2026: VecCreate(ctx->comm,&v2);
2027: VecSetSizes(v2,PETSC_DECIDE,N/Nf);
2028: VecSetFromOptions(v1); // ???
2029: VecSetFromOptions(v2);
2030: // get each component
2031: VecGetBlockSize(Gsub,&bs);
2032: if (bs != Nf) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %D != num_species %D in Gsub",bs,Nf);
2033: VecGetBlockSize(Mfsub,&bs);
2034: if (bs != Nf) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "bs %D != num_species %D",bs,Nf);
2035: for (int i=0, ix=ctx->species_offset[grid] ; i<Nf ; i++, ix++) {
2036: PetscScalar val;
2037: VecStrideGather(Gsub,i,v1,INSERT_VALUES);
2038: VecStrideGather(Mfsub,i,v2,INSERT_VALUES);
2039: VecDot(v1,v2,&val);
2040: energy[ix] = PetscRealPart(val)*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ix];
2041: }
2042: VecDestroy(&v1);
2043: VecDestroy(&v2);
2044: } /* grids */
2045: DMCompositeRestoreAccessArray(pack, globGamma, ctx->num_grids, NULL, globGammaArray);
2046: DMCompositeRestoreAccessArray(pack, Mf, ctx->num_grids, NULL, globMfarray);
2047: VecDestroy(&globGamma);
2048: VecDestroy(&Mf);
2049: }
2050: } else {
2051: PetscDSSetObjective(prob, 0, &f0_s_v2);
2052: DMPlexComputeIntegralFEM(ctx->plex[grid],Xloc,tt,ctx);
2053: energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
2054: }
2055: PetscPrintf( ctx->comm, "%3D) species %D: density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",
2056: stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
2057: xmomentumtot += xmomentum[ii];
2058: ymomentumtot += ymomentum[ii];
2059: zmomentumtot += zmomentum[ii];
2060: energytot += energy[ii];
2061: densitytot += density[ii];
2062: }
2063: if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
2064: }
2065: }
2066: DMCompositeRestoreAccessArray(pack, X, ctx->num_grids, NULL, globXArray);
2067: /* totals */
2068: DMPlexGetHeightStratum(ctx->plex[0],0,&cStart,&cEnd);
2069: if (ctx->num_species>1) {
2070: if (dim==2) {
2071: PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells on electron grid)",
2072: stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
2073: } else {
2074: PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)",
2075: stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
2076: }
2077: } else {
2078: PetscPrintf(ctx->comm, " -- %D cells",cEnd-cStart);
2079: }
2080: if (ctx->verbose > 1) {PetscPrintf(ctx->comm,", %D sub (vector) threads\n",ctx->subThreadBlockSize);}
2081: else {PetscPrintf(ctx->comm,"\n");}
2082: return(0);
2083: }
2085: static PetscErrorCode destroy_coloring (void *is)
2086: {
2087: ISColoring tmp = (ISColoring)is;
2088: return ISColoringDestroy(&tmp);
2089: }
2091: /*@
2092: LandauCreateColoring - create a coloring and add to matrix (Landau context used just for 'print' flag, should be in DMPlex)
2094: Collective on JacP
2096: Input Parameters:
2097: + JacP - matrix to add coloring to
2098: - plex - The DM
2100: Output Parameter:
2101: . container - Container with coloring
2103: Level: beginner
2105: .keywords: mesh
2106: .seealso: LandauCreateVelocitySpace()
2107: @*/
2108: PetscErrorCode LandauCreateColoring(Mat JacP, DM plex, PetscContainer *container)
2109: {
2110: PetscErrorCode ierr;
2111: PetscInt dim,cell,i,ej,nc,Nv,totDim,numGCells,cStart,cEnd;
2112: ISColoring iscoloring = NULL;
2113: Mat G,Q;
2114: PetscScalar ones[128];
2115: MatColoring mc;
2116: IS *is;
2117: PetscInt csize,colour,j,k;
2118: const PetscInt *indices;
2119: PetscInt numComp[1];
2120: PetscInt numDof[4];
2121: PetscFE fe;
2122: DM colordm;
2123: PetscSection csection, section, globalSection;
2124: PetscDS prob;
2125: LandauCtx *ctx;
2128: DMGetApplicationContext(plex, &ctx);
2129: DMGetLocalSection(plex, §ion);
2130: DMGetGlobalSection(plex, &globalSection);
2131: DMGetDimension(plex, &dim);
2132: DMGetDS(plex, &prob);
2133: PetscDSGetTotalDimension(prob, &totDim);
2134: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
2135: numGCells = cEnd - cStart;
2136: /* create cell centered DM */
2137: DMClone(plex, &colordm);
2138: PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, "color_", PETSC_DECIDE, &fe);
2139: PetscObjectSetName((PetscObject) fe, "color");
2140: DMSetField(colordm, 0, NULL, (PetscObject)fe);
2141: PetscFEDestroy(&fe);
2142: for (i = 0; i < (dim+1); ++i) numDof[i] = 0;
2143: numDof[dim] = 1;
2144: numComp[0] = 1;
2145: DMPlexCreateSection(colordm, NULL, numComp, numDof, 0, NULL, NULL, NULL, NULL, &csection);
2146: PetscSectionSetFieldName(csection, 0, "color");
2147: DMSetLocalSection(colordm, csection);
2148: DMViewFromOptions(colordm,NULL,"-color_dm_view");
2149: /* get vertex to element map Q and colroing graph G */
2150: MatGetSize(JacP,NULL,&Nv);
2151: MatCreateAIJ(PETSC_COMM_SELF,PETSC_DECIDE,PETSC_DECIDE,numGCells,Nv,totDim,NULL,0,NULL,&Q);
2152: for (i=0;i<128;i++) ones[i] = 1.0;
2153: for (cell = cStart, ej = 0 ; cell < cEnd; ++cell, ++ej) {
2154: PetscInt numindices,*indices;
2155: DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
2156: if (numindices>128) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many indices. %D > %D",numindices,128);
2157: MatSetValues(Q,1,&ej,numindices,indices,ones,ADD_VALUES);
2158: DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
2159: }
2160: MatAssemblyBegin(Q, MAT_FINAL_ASSEMBLY);
2161: MatAssemblyEnd(Q, MAT_FINAL_ASSEMBLY);
2162: MatMatTransposeMult(Q,Q,MAT_INITIAL_MATRIX,4.0,&G);
2163: PetscObjectSetName((PetscObject) Q, "Q");
2164: PetscObjectSetName((PetscObject) G, "coloring graph");
2165: MatViewFromOptions(G,NULL,"-coloring_mat_view");
2166: MatViewFromOptions(Q,NULL,"-coloring_mat_view");
2167: MatDestroy(&Q);
2168: /* coloring */
2169: MatColoringCreate(G,&mc);
2170: MatColoringSetDistance(mc,1);
2171: MatColoringSetType(mc,MATCOLORINGJP);
2172: MatColoringSetFromOptions(mc);
2173: MatColoringApply(mc,&iscoloring);
2174: MatColoringDestroy(&mc);
2175: /* view */
2176: ISColoringViewFromOptions(iscoloring,NULL,"-coloring_is_view");
2177: ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
2178: if (ctx && ctx->verbose > 2) {
2179: PetscViewer viewer;
2180: Vec color_vec, eidx_vec;
2181: DMGetGlobalVector(colordm, &color_vec);
2182: DMGetGlobalVector(colordm, &eidx_vec);
2183: for (colour=0; colour<nc; colour++) {
2184: ISGetLocalSize(is[colour],&csize);
2185: ISGetIndices(is[colour],&indices);
2186: for (j=0; j<csize; j++) {
2187: PetscScalar v = (PetscScalar)colour;
2188: k = indices[j];
2189: VecSetValues(color_vec,1,&k,&v,INSERT_VALUES);
2190: v = (PetscScalar)k;
2191: VecSetValues(eidx_vec,1,&k,&v,INSERT_VALUES);
2192: }
2193: ISRestoreIndices(is[colour],&indices);
2194: }
2195: /* view */
2196: PetscViewerVTKOpen(ctx->comm, "color.vtu", FILE_MODE_WRITE, &viewer);
2197: PetscObjectSetName((PetscObject) color_vec, "color");
2198: VecView(color_vec, viewer);
2199: PetscViewerDestroy(&viewer);
2200: PetscViewerVTKOpen(ctx->comm, "eidx.vtu", FILE_MODE_WRITE, &viewer);
2201: PetscObjectSetName((PetscObject) eidx_vec, "element-idx");
2202: VecView(eidx_vec, viewer);
2203: PetscViewerDestroy(&viewer);
2204: DMRestoreGlobalVector(colordm, &color_vec);
2205: DMRestoreGlobalVector(colordm, &eidx_vec);
2206: }
2207: PetscSectionDestroy(&csection);
2208: DMDestroy(&colordm);
2209: ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
2210: MatDestroy(&G);
2211: /* stash coloring */
2212: PetscContainerCreate(PETSC_COMM_SELF, container);
2213: PetscContainerSetPointer(*container,(void*)iscoloring);
2214: PetscContainerSetUserDestroy(*container, destroy_coloring);
2215: PetscObjectCompose((PetscObject)JacP,"coloring",(PetscObject)*container);
2216: if (ctx && ctx->verbose > 0) {
2217: PetscPrintf(ctx->comm, "Made coloring with %D colors\n", nc);
2218: }
2219: return(0);
2220: }
2222: PetscErrorCode LandauAssembleOpenMP(PetscInt cStart, PetscInt cEnd, PetscInt totDim, DM plex, PetscSection section, PetscSection globalSection, Mat JacP, PetscScalar elemMats[], PetscContainer container)
2223: {
2224: PetscErrorCode ierr;
2225: IS *is;
2226: PetscInt nc,colour,j;
2227: const PetscInt *clr_idxs;
2228: ISColoring iscoloring;
2230: PetscContainerGetPointer(container,(void**)&iscoloring);
2231: ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
2232: for (colour=0; colour<nc; colour++) {
2233: PetscInt *idx_arr[1024]; /* need to make dynamic for general use */
2234: PetscScalar *new_el_mats[1024];
2235: PetscInt idx_size[1024],csize;
2236: ISGetLocalSize(is[colour],&csize);
2237: if (csize>1024) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many elements in color. %D > %D",csize,1024);
2238: ISGetIndices(is[colour],&clr_idxs);
2239: /* get indices and mats */
2240: for (j=0; j<csize; j++) {
2241: PetscInt cell = cStart + clr_idxs[j];
2242: PetscInt numindices,*indices;
2243: PetscScalar *elMat = &elemMats[clr_idxs[j]*totDim*totDim];
2244: PetscScalar *valuesOrig = elMat;
2245: DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
2246: idx_size[j] = numindices;
2247: PetscMalloc2(numindices,&idx_arr[j],numindices*numindices,&new_el_mats[j]);
2248: PetscMemcpy(idx_arr[j],indices,numindices*sizeof(*idx_arr[j]));
2249: PetscMemcpy(new_el_mats[j],elMat,numindices*numindices*sizeof(*new_el_mats[j]));
2250: DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
2251: if (elMat != valuesOrig) {DMRestoreWorkArray(plex, numindices*numindices, MPIU_SCALAR, &elMat);}
2252: }
2253: /* assemble matrix */
2254: for (j=0; j<csize; j++) {
2255: PetscInt numindices = idx_size[j], *indices = idx_arr[j];
2256: PetscScalar *elMat = new_el_mats[j];
2257: MatSetValues(JacP,numindices,indices,numindices,indices,elMat,ADD_VALUES);
2258: }
2259: /* free */
2260: ISRestoreIndices(is[colour],&clr_idxs);
2261: for (j=0; j<csize; j++) {
2262: PetscFree2(idx_arr[j],new_el_mats[j]);
2263: }
2264: }
2265: ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
2266: return(0);
2267: }
2269: /* < v, u > */
2270: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2271: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2272: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2273: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2274: {
2275: g0[0] = 1.;
2276: }
2278: /* < v, u > */
2279: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
2280: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
2281: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
2282: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
2283: {
2284: g0[0] = 2.*PETSC_PI*x[0];
2285: }
2287: /*@
2288: LandauCreateMassMatrix - Create mass matrix for Landau
2290: Collective on pack
2292: Input Parameters:
2293: . pack - the DM object
2295: Output Parameters:
2296: . Amat - The mass matrix (optional), mass matrix is added to the DM context
2298: Level: beginner
2300: .keywords: mesh
2301: .seealso: LandauCreateVelocitySpace()
2302: @*/
2303: PetscErrorCode LandauCreateMassMatrix(DM pack, Mat *Amat)
2304: {
2305: DM mass_pack,massDM[LANDAU_MAX_GRIDS];
2306: PetscDS prob;
2307: PetscInt ii,dim,N1=1,N2;
2309: LandauCtx *ctx;
2310: Mat packM,subM[LANDAU_MAX_GRIDS];
2315: DMGetApplicationContext(pack, &ctx);
2316: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2317: DMGetDimension(pack, &dim);
2318: DMCompositeCreate(PetscObjectComm((PetscObject) pack),&mass_pack);
2319: /* create pack mass matrix */
2320: for (PetscInt grid=0, ix=0 ; grid<ctx->num_grids ; grid++) {
2321: DMClone(ctx->plex[grid], &massDM[grid]);
2322: DMCopyFields(ctx->plex[grid], massDM[grid]);
2323: DMCreateDS(massDM[grid]);
2324: DMGetDS(massDM[grid], &prob);
2325: //for (ii=0;ii<ctx->num_species;ii++) {
2326: for (ix=0, ii=ctx->species_offset[grid];ii<ctx->species_offset[grid+1];ii++,ix++) {
2327: if (dim==3) {PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL);}
2328: else {PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL);}
2329: }
2330: DMCompositeAddDM(mass_pack,massDM[grid]);
2331: DMCreateMatrix(massDM[grid], &subM[grid]);
2332: }
2333: PetscOptionsInsertString(NULL,"-dm_preallocate_only");
2334: DMSetFromOptions(mass_pack);
2335: DMCreateMatrix(mass_pack, &packM);
2336: PetscOptionsInsertString(NULL,"-dm_preallocate_only false");
2337: MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
2338: MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2339: DMViewFromOptions(mass_pack,NULL,"-dm_landau_mass_dm_view");
2340: DMDestroy(&mass_pack);
2341: /* make mass matrix for each block */
2342: for (PetscInt grid=0;grid<ctx->num_grids;grid++) {
2343: Vec locX;
2344: DM plex = massDM[grid];
2345: DMGetLocalVector(plex, &locX);
2346: /* Mass matrix is independent of the input, so no need to fill locX */
2347: DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx);
2348: DMRestoreLocalVector(plex, &locX);
2349: DMDestroy(&massDM[grid]);
2350: }
2351: MatGetSize(ctx->J, &N1, NULL);
2352: MatGetSize(packM, &N2, NULL);
2353: if (N1 != N2) SETERRQ2(PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %D, |Mass|=%D",N1,N2);
2354: /* assemble block diagonals */
2355: ctx->mat_offset[0] = 0;
2356: for (PetscInt grid=0 ; grid<ctx->num_grids ; grid++) {
2357: PetscInt nloc, nzl, colbuf[1024], row;
2358: const PetscInt *cols;
2359: const PetscScalar *vals;
2360: Mat B = subM[grid];
2362: MatGetSize(B, &nloc, NULL);
2363: for (int i=0 ; i<nloc ; i++) {
2364: MatGetRow(B,i,&nzl,&cols,&vals);
2365: if (nzl>1024) SETERRQ1(PetscObjectComm((PetscObject) pack), PETSC_ERR_PLIB, "Row too big: %D",nzl);
2366: for (int j=0; j<nzl; j++) colbuf[j] = cols[j] + ctx->mat_offset[grid];
2367: row = i + ctx->mat_offset[grid];
2368: MatSetValues(packM,1,&row,nzl,colbuf,vals,INSERT_VALUES);
2369: MatRestoreRow(B,i,&nzl,&cols,&vals);
2370: }
2371: MatDestroy(&subM[grid]);
2372: ctx->mat_offset[grid+1] = ctx->mat_offset[grid] + nloc;
2373: }
2374: MatAssemblyBegin(packM,MAT_FINAL_ASSEMBLY);
2375: MatAssemblyEnd(packM,MAT_FINAL_ASSEMBLY);
2376: PetscObjectSetName((PetscObject)packM, "mass");
2377: MatViewFromOptions(packM,NULL,"-dm_landau_mass_view");
2378: ctx->M = packM; /* this could be a noop, a = a */
2379: if (Amat) *Amat = packM;
2380: return(0);
2381: }
2383: /*@
2384: LandauIFunction - TS residual calculation
2386: Collective on ts
2388: Input Parameters:
2389: + TS - The time stepping context
2390: . time_dummy - current time (not used)
2391: - X - Current state
2392: + X_t - Time derivative of current state
2393: . actx - Landau context
2395: Output Parameter:
2396: . F - The residual
2398: Level: beginner
2400: .keywords: mesh
2401: .seealso: LandauCreateVelocitySpace(), LandauIJacobian()
2402: @*/
2403: PetscErrorCode LandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2404: {
2406: LandauCtx *ctx=(LandauCtx*)actx;
2407: PetscInt dim;
2408: DM pack;
2409: #if defined(PETSC_HAVE_THREADSAFETY)
2410: double starttime, endtime;
2411: #endif
2414: TSGetDM(ts,&pack);
2415: DMGetApplicationContext(pack, &ctx);
2416: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2417: PetscLogEventBegin(ctx->events[11],0,0,0,0);
2418: PetscLogEventBegin(ctx->events[0],0,0,0,0);
2419: #if defined(PETSC_HAVE_THREADSAFETY)
2420: starttime = MPI_Wtime();
2421: #endif
2422: DMGetDimension(pack, &dim);
2423: if (!ctx->aux_bool) {
2424: PetscInfo3(ts, "Create Landau Jacobian t=%g X=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");
2425: LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);
2426: MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view");
2427: ctx->aux_bool = PETSC_TRUE;
2428: } else {
2429: PetscInfo(ts, "Skip forming Jacobian, has not changed (should check norm)\n");
2430: }
2431: /* mat vec for op */
2432: MatMult(ctx->J,X,F); /* C*f */
2433: /* add time term */
2434: if (X_t) {
2435: MatMultAdd(ctx->M,X_t,F,F);
2436: }
2437: #if defined(PETSC_HAVE_THREADSAFETY)
2438: endtime = MPI_Wtime();
2439: ctx->times[0] += (endtime - starttime);
2440: #endif
2441: PetscLogEventEnd(ctx->events[0],0,0,0,0);
2442: PetscLogEventEnd(ctx->events[11],0,0,0,0);
2443: return(0);
2444: }
2445: static PetscErrorCode MatrixNfDestroy(void *ptr)
2446: {
2447: PetscInt *nf = (PetscInt *)ptr;
2448: PetscErrorCode ierr;
2450: PetscFree(nf);
2451: return(0);
2452: }
2453: /*@
2454: LandauIJacobian - TS Jacobian construction
2456: Collective on ts
2458: Input Parameters:
2459: + TS - The time stepping context
2460: . time_dummy - current time (not used)
2461: - X - Current state
2462: + U_tdummy - Time derivative of current state (not used)
2463: . shift - shift for du/dt term
2464: - actx - Landau context
2466: Output Parameter:
2467: . Amat - Jacobian
2468: + Pmat - same as Amat
2470: Level: beginner
2472: .keywords: mesh
2473: .seealso: LandauCreateVelocitySpace(), LandauIFunction()
2474: @*/
2475: PetscErrorCode LandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2476: {
2478: LandauCtx *ctx=(LandauCtx*)actx;
2479: PetscInt dim;
2480: DM pack;
2481: PetscContainer container;
2482: #if defined(PETSC_HAVE_THREADSAFETY)
2483: double starttime, endtime;
2484: #endif
2487: TSGetDM(ts,&pack);
2488: DMGetApplicationContext(pack, &ctx);
2489: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2490: if (Amat!=Pmat || Amat!=ctx->J) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2491: DMGetDimension(pack, &dim);
2492: /* get collision Jacobian into A */
2493: PetscLogEventBegin(ctx->events[11],0,0,0,0);
2494: PetscLogEventBegin(ctx->events[9],0,0,0,0);
2495: #if defined(PETSC_HAVE_THREADSAFETY)
2496: starttime = MPI_Wtime();
2497: #endif
2498: PetscInfo2(ts, "Adding just mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);
2499: if (shift==0.0) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "zero shift");
2500: if (!ctx->aux_bool) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "wrong state");
2501: if (!ctx->use_matrix_mass) {
2502: LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);
2503: MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view");
2504: } else { /* add mass */
2505: MatAXPY(Pmat,shift,ctx->M,SAME_NONZERO_PATTERN);
2506: }
2507: ctx->aux_bool = PETSC_FALSE;
2508: /* set number species in Jacobian */
2509: PetscObjectQuery((PetscObject) ctx->J, "Nf", (PetscObject *) &container);
2510: if (!container) {
2511: PetscInt *pNf;
2512: PetscContainerCreate(PETSC_COMM_SELF, &container);
2513: PetscMalloc(sizeof(*pNf), &pNf);
2514: *pNf = ctx->num_species + 1000*ctx->numConcurrency;
2515: PetscContainerSetPointer(container, (void *)pNf);
2516: PetscContainerSetUserDestroy(container, MatrixNfDestroy);
2517: PetscObjectCompose((PetscObject)ctx->J, "Nf", (PetscObject) container);
2518: PetscContainerDestroy(&container);
2519: }
2520: #if defined(PETSC_HAVE_THREADSAFETY)
2521: endtime = MPI_Wtime();
2522: ctx->times[0] += (endtime - starttime);
2523: #endif
2524: PetscLogEventEnd(ctx->events[9],0,0,0,0);
2525: PetscLogEventEnd(ctx->events[11],0,0,0,0);
2526: return(0);
2527: }