Actual source code: sfallgatherv.c
1: #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
3: PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
5: /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6: PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
7: {
9: PetscInt i,j,k;
10: const PetscInt *range;
11: PetscMPIInt size;
14: MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
15: if (nroots) *nroots = sf->nroots;
16: if (nleaves) *nleaves = sf->nleaves;
17: if (ilocal) *ilocal = NULL; /* Contiguous leaves */
18: if (iremote) {
19: if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
20: PetscLayoutGetRanges(sf->map,&range);
21: PetscMalloc1(sf->nleaves,&sf->remote);
22: sf->remote_alloc = sf->remote;
23: for (i=0; i<size; i++) {
24: for (j=range[i],k=0; j<range[i+1]; j++,k++) {
25: sf->remote[j].rank = i;
26: sf->remote[j].index = k;
27: }
28: }
29: }
30: *iremote = sf->remote;
31: }
32: return(0);
33: }
35: PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
36: {
37: PetscErrorCode ierr;
38: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
39: PetscMPIInt size;
40: PetscInt i;
41: const PetscInt *range;
44: PetscSFSetUp_Allgather(sf);
45: MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);
46: if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
47: PetscMalloc1(size,&dat->recvcounts);
48: PetscMalloc1(size,&dat->displs);
49: PetscLayoutGetRanges(sf->map,&range);
51: for (i=0; i<size; i++) {
52: PetscMPIIntCast(range[i],&dat->displs[i]);
53: PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);
54: }
55: }
56: return(0);
57: }
59: PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
60: {
61: PetscErrorCode ierr;
62: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
63: PetscSFLink link = dat->avail,next;
66: PetscFree(dat->iranks);
67: PetscFree(dat->ioffset);
68: PetscFree(dat->irootloc);
69: PetscFree(dat->recvcounts);
70: PetscFree(dat->displs);
71: if (dat->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
72: for (; link; link=next) {next = link->next; PetscSFLinkDestroy(sf,link);}
73: dat->avail = NULL;
74: return(0);
75: }
77: PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
78: {
82: PetscSFReset_Allgatherv(sf);
83: PetscFree(sf->data);
84: return(0);
85: }
87: static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
88: {
89: PetscErrorCode ierr;
90: PetscSFLink link;
91: PetscMPIInt sendcount;
92: MPI_Comm comm;
93: void *rootbuf = NULL,*leafbuf = NULL;
94: MPI_Request *req;
95: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
98: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);
99: PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);
100: PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
101: PetscObjectGetComm((PetscObject)sf,&comm);
102: PetscMPIIntCast(sf->nroots,&sendcount);
103: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_../../../../../..2LEAF,&rootbuf,&leafbuf,&req,NULL);
104: PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_../../../../../..2LEAF);
105: MPIU_Iallgatherv(rootbuf,sendcount,unit,leafbuf,dat->recvcounts,dat->displs,unit,comm,req);
106: return(0);
107: }
109: static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
110: {
111: PetscErrorCode ierr;
112: PetscSFLink link;
113: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
114: PetscInt rstart;
115: PetscMPIInt rank,count,recvcount;
116: MPI_Comm comm;
117: void *rootbuf = NULL,*leafbuf = NULL;
118: MPI_Request *req;
121: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);
122: if (op == MPI_REPLACE) {
123: /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
124: PetscLayoutGetRange(sf->map,&rstart,NULL);
125: (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);
126: if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) {(*link->SyncStream)(link);}
127: } else {
128: /* Reduce leafdata, then scatter to rootdata */
129: PetscObjectGetComm((PetscObject)sf,&comm);
130: MPI_Comm_rank(comm,&rank);
131: PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);
132: PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);
133: PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2../../../../../..,&rootbuf,&leafbuf,&req,NULL);
134: PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);
135: /* Allocate a separate leaf buffer on rank 0 */
136: if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
137: PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);
138: }
139: /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
140: if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
141: PetscMPIIntCast(sf->nleaves*link->bs,&count);
142: PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2../../../../../..);
143: MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm); /* Must do reduce with MPI builltin datatype basicunit */
144: MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],dat->recvcounts,dat->displs,unit,rootbuf,recvcount,unit,0,comm,req);
145: }
146: return(0);
147: }
149: PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
150: {
151: PetscErrorCode ierr;
152: PetscSFLink link;
155: if (op == MPI_REPLACE) {
156: /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
157: we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
158: of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
159: It does a host to device memory copy on rootbuf, wrongly overwritting the results. So we don't overload
160: PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
161: */
162: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
163: PetscSFLinkReclaim(sf,&link);
164: } else {
165: PetscSFReduceEnd_Basic(sf,unit,leafdata,rootdata,op);
166: }
167: return(0);
168: }
170: static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
171: {
172: PetscErrorCode ierr;
173: PetscSFLink link;
174: PetscMPIInt rank;
177: PetscSFBcastBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE);
178: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
179: PetscSFLinkFinishCommunication(sf,link,PETSCSF_../../../../../..2LEAF);
180: MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);
181: if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
182: (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);
183: }
184: PetscSFLinkReclaim(sf,&link);
185: return(0);
186: }
188: /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
190: Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
191: to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
192: in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
193: root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank
194: 0,1,2 is 1,3,6 respectively. root is 10.
196: We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
197: rank-0 rank-1 rank-2
198: Root 1
199: Leaf 2 3 4
200: Leafupdate 2 3 4
202: Do MPI_Exscan on leafupdate,
203: rank-0 rank-1 rank-2
204: Root 1
205: Leaf 2 3 4
206: Leafupdate 2 2 5
208: BcastAndOp from root to leafupdate,
209: rank-0 rank-1 rank-2
210: Root 1
211: Leaf 2 3 4
212: Leafupdate 3 3 6
214: Copy root to leafupdate on rank-0
215: rank-0 rank-1 rank-2
216: Root 1
217: Leaf 2 3 4
218: Leafupdate 1 3 6
220: Reduce from leaf to root,
221: rank-0 rank-1 rank-2
222: Root 10
223: Leaf 2 3 4
224: Leafupdate 1 3 6
225: */
226: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
227: {
228: PetscErrorCode ierr;
229: PetscSFLink link;
230: MPI_Comm comm;
231: PetscMPIInt count;
234: PetscObjectGetComm((PetscObject)sf,&comm);
235: if (PetscMemTypeDevice(rootmtype) || PetscMemTypeDevice(leafmtype)) SETERRQ(comm,PETSC_ERR_SUP,"Do FetchAndOp on device");
236: /* Copy leafdata to leafupdate */
237: PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_FETCH,&link);
238: PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata); /* Sync the device */
239: (*link->Memcpy)(link,leafmtype,leafupdate,leafmtype,leafdata,sf->nleaves*link->unitbytes);
240: PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);
242: /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
243: if (op == MPI_REPLACE) {
244: PetscMPIInt size,rank,prev,next;
245: MPI_Comm_rank(comm,&rank);
246: MPI_Comm_size(comm,&size);
247: prev = rank ? rank-1 : MPI_PROC_NULL;
248: next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
249: PetscMPIIntCast(sf->nleaves,&count);
250: MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);
251: } else {
252: PetscMPIIntCast(sf->nleaves*link->bs,&count);
253: MPI_Exscan(MPI_IN_PLACE,leafupdate,count,link->basicunit,op,comm);
254: }
255: PetscSFLinkReclaim(sf,&link);
256: PetscSFBcastBegin(sf,unit,rootdata,leafupdate,op);
257: PetscSFBcastEnd(sf,unit,rootdata,leafupdate,op);
259: /* Bcast roots to rank 0's leafupdate */
260: PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate); /* Using this line makes Allgather SFs able to inherit this routine */
262: /* Reduce leafdata to rootdata */
263: PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);
264: return(0);
265: }
267: PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
268: {
269: PetscErrorCode ierr;
272: PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);
273: return(0);
274: }
276: /* Get root ranks accessing my leaves */
277: PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
278: {
280: PetscInt i,j,k,size;
281: const PetscInt *range;
284: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
285: if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
286: size = sf->nranks;
287: PetscLayoutGetRanges(sf->map,&range);
288: PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);
289: for (i=0; i<size; i++) sf->ranks[i] = i;
290: PetscArraycpy(sf->roffset,range,size+1);
291: for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
292: for (i=0; i<size; i++) {
293: for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
294: }
295: }
297: if (nranks) *nranks = sf->nranks;
298: if (ranks) *ranks = sf->ranks;
299: if (roffset) *roffset = sf->roffset;
300: if (rmine) *rmine = sf->rmine;
301: if (rremote) *rremote = sf->rremote;
302: return(0);
303: }
305: /* Get leaf ranks accessing my roots */
306: PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
307: {
308: PetscErrorCode ierr;
309: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
310: MPI_Comm comm;
311: PetscMPIInt size,rank;
312: PetscInt i,j;
315: /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
316: PetscObjectGetComm((PetscObject)sf,&comm);
317: MPI_Comm_size(comm,&size);
318: MPI_Comm_rank(comm,&rank);
319: if (niranks) *niranks = size;
321: /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
322: sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
323: */
324: if (iranks) {
325: if (!dat->iranks) {
326: PetscMalloc1(size,&dat->iranks);
327: dat->iranks[0] = rank;
328: for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
329: }
330: *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
331: }
333: if (ioffset) {
334: if (!dat->ioffset) {
335: PetscMalloc1(size+1,&dat->ioffset);
336: for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
337: }
338: *ioffset = dat->ioffset;
339: }
341: if (irootloc) {
342: if (!dat->irootloc) {
343: PetscMalloc1(sf->nleaves,&dat->irootloc);
344: for (i=0; i<size; i++) {
345: for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
346: }
347: }
348: *irootloc = dat->irootloc;
349: }
350: return(0);
351: }
353: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
354: {
355: PetscInt i,nroots,nleaves,rstart,*ilocal;
356: PetscSFNode *iremote;
357: PetscSF lsf;
361: nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
362: nroots = nleaves;
363: PetscMalloc1(nleaves,&ilocal);
364: PetscMalloc1(nleaves,&iremote);
365: PetscLayoutGetRange(sf->map,&rstart,NULL);
367: for (i=0; i<nleaves; i++) {
368: ilocal[i] = rstart + i; /* lsf does not change leave indices */
369: iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */
370: iremote[i].index = i; /* root index */
371: }
373: PetscSFCreate(PETSC_COMM_SELF,&lsf);
374: PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);
375: PetscSFSetUp(lsf);
376: *out = lsf;
377: return(0);
378: }
380: PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
381: {
382: PetscErrorCode ierr;
383: PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
386: sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
387: sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
389: sf->ops->SetUp = PetscSFSetUp_Allgatherv;
390: sf->ops->Reset = PetscSFReset_Allgatherv;
391: sf->ops->Destroy = PetscSFDestroy_Allgatherv;
392: sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
393: sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
394: sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
395: sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv;
396: sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv;
397: sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
398: sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
399: sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
400: sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv;
402: PetscNewLog(sf,&dat);
403: sf->data = (void*)dat;
404: return(0);
405: }