Actual source code: isltog.c

  1: #define PETSCVEC_DLL

 3:  #include petscvec.h
 4:  #include private/isimpl.h

  6: PetscCookie  IS_LTOGM_COOKIE;

 10: /*@C
 11:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 13:     Not Collective

 15:     Input Parameter:
 16: .   ltog - local to global mapping

 18:     Output Parameter:
 19: .   n - the number of entries in the local mapping

 21:     Level: advanced

 23:     Concepts: mapping^local to global

 25: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 26: @*/
 27: PetscErrorCode  ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,PetscInt *n)
 28: {
 32:   *n = mapping->n;
 33:   return(0);
 34: }

 38: /*@C
 39:     ISLocalToGlobalMappingView - View a local to global mapping

 41:     Not Collective

 43:     Input Parameters:
 44: +   ltog - local to global mapping
 45: -   viewer - viewer

 47:     Level: advanced

 49:     Concepts: mapping^local to global

 51: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 52: @*/
 53: PetscErrorCode  ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 54: {
 55:   PetscInt        i;
 56:   PetscMPIInt     rank;
 57:   PetscTruth      iascii;
 58:   PetscErrorCode  ierr;

 62:   if (!viewer) {
 63:     PetscViewerASCIIGetStdout(((PetscObject)mapping)->comm,&viewer);
 64:   }

 67:   MPI_Comm_rank(((PetscObject)mapping)->comm,&rank);
 68:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
 69:   if (iascii) {
 70:     for (i=0; i<mapping->n; i++) {
 71:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %d\n",rank,i,mapping->indices[i]);
 72:     }
 73:     PetscViewerFlush(viewer);
 74:   } else {
 75:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 76:   }

 78:   return(0);
 79: }

 83: /*@
 84:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 85:     ordering and a global parallel ordering.

 87:     Not collective

 89:     Input Parameter:
 90: .   is - index set containing the global numbers for each local

 92:     Output Parameter:
 93: .   mapping - new mapping data structure

 95:     Level: advanced

 97:     Concepts: mapping^local to global

 99: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
100: @*/
101: PetscErrorCode  ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
102: {
104:   PetscInt       n;
105:   const PetscInt *indices;
106:   MPI_Comm       comm;


112:   PetscObjectGetComm((PetscObject)is,&comm);
113:   ISGetLocalSize(is,&n);
114:   ISGetIndices(is,&indices);
115:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
116:   ISRestoreIndices(is,&indices);

118:   return(0);
119: }


124: /*@
125:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
126:     ordering and a global parallel ordering.

128:     Not Collective, but communicator may have more than one process

130:     Input Parameters:
131: +   comm - MPI communicator
132: .   n - the number of local elements
133: -   indices - the global index for each local element

135:     Output Parameter:
136: .   mapping - new mapping data structure

138:     Level: advanced

140:     Concepts: mapping^local to global

142: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
143: @*/
144: PetscErrorCode  ISLocalToGlobalMappingCreate(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
145: {
147:   PetscInt       *in;

152:   PetscMalloc(n*sizeof(PetscInt),&in);
153:   PetscMemcpy(in,indices,n*sizeof(PetscInt));
154:   ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
155:   return(0);
156: }

160: /*@C
161:     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
162:     ordering and a global parallel ordering.

164:     Not Collective, but communicator may have more than one process

166:     Input Parameters:
167: +   comm - MPI communicator
168: .   n - the number of local elements
169: -   indices - the global index for each local element

171:     Output Parameter:
172: .   mapping - new mapping data structure

174:     Level: developer

176:     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
177:     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.

179:     Concepts: mapping^local to global

181: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
182: @*/
183: PetscErrorCode  ISLocalToGlobalMappingCreateNC(MPI_Comm cm,PetscInt n,const PetscInt indices[],ISLocalToGlobalMapping *mapping)
184: {

190:   *mapping = PETSC_NULL;
191: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
192:   ISInitializePackage(PETSC_NULL);
193: #endif

195:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
196:                            cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);

198:   (*mapping)->n       = n;
199:   (*mapping)->indices = (PetscInt*)indices;
200:   PetscLogObjectMemory(*mapping,n*sizeof(PetscInt));

202:   /*
203:     Do not create the global to local mapping. This is only created if 
204:     ISGlobalToLocalMapping() is called 
205:   */
206:   (*mapping)->globals = 0;
207:   return(0);
208: }

212: /*@
213:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
214:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
215:        and VecSetLocalToGlobalMappingBlock().

217:     Not Collective, but communicator may have more than one process

219:     Input Parameters:
220: +    inmap - original point-wise mapping
221: -    bs - block size

223:     Output Parameter:
224: .   outmap - block based mapping; the indices are relative to BLOCKS, not individual vector or matrix entries.

226:     Level: advanced

228:     Concepts: mapping^local to global

230: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
231: @*/
232: PetscErrorCode  ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,PetscInt bs,ISLocalToGlobalMapping *outmap)
233: {
235:   PetscInt       *ii,i,n;

240:   if (bs > 1) {
241:     n    = inmap->n/bs;
242:     if (n*bs != inmap->n) SETERRQ(PETSC_ERR_ARG_INCOMP,"Pointwise mapping length is not divisible by block size");
243:     PetscMalloc(n*sizeof(PetscInt),&ii);
244:     for (i=0; i<n; i++) {
245:       ii[i] = inmap->indices[bs*i]/bs;
246:     }
247:     ISLocalToGlobalMappingCreate(((PetscObject)inmap)->comm,n,ii,outmap);
248:     PetscFree(ii);
249:   } else {
250:     PetscObjectReference((PetscObject)inmap);
251:     *outmap = inmap;
252:   }
253:   return(0);
254: }
255: 
258: /*@
259:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
260:    ordering and a global parallel ordering.

262:    Note Collective

264:    Input Parameters:
265: .  mapping - mapping data structure

267:    Level: advanced

269: .seealso: ISLocalToGlobalMappingCreate()
270: @*/
271: PetscErrorCode  ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
272: {
276:   if (--((PetscObject)mapping)->refct > 0) return(0);
277:   PetscFree(mapping->indices);
278:   PetscFree(mapping->globals);
279:   PetscHeaderDestroy(mapping);
280:   return(0);
281: }
282: 
285: /*@
286:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
287:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
288:     context.

290:     Not collective

292:     Input Parameters:
293: +   mapping - mapping between local and global numbering
294: -   is - index set in local numbering

296:     Output Parameters:
297: .   newis - index set in global numbering

299:     Level: advanced

301:     Concepts: mapping^local to global

303: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
304:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
305: @*/
306: PetscErrorCode  ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
307: {
309:   PetscInt       n,i,*idxmap,*idxout,Nmax = mapping->n;
310:   const PetscInt *idxin;


317:   ISGetLocalSize(is,&n);
318:   ISGetIndices(is,&idxin);
319:   idxmap = mapping->indices;
320: 
321:   PetscMalloc(n*sizeof(PetscInt),&idxout);
322:   for (i=0; i<n; i++) {
323:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax-1,i);
324:     idxout[i] = idxmap[idxin[i]];
325:   }
326:   ISRestoreIndices(is,&idxin);
327:   ISCreateGeneralNC(PETSC_COMM_SELF,n,idxout,newis);
328:   return(0);
329: }

331: /*MC
332:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
333:    and converts them to the global numbering.

335:    Not collective

337:    Input Parameters:
338: +  mapping - the local to global mapping context
339: .  N - number of integers
340: -  in - input indices in local numbering

342:    Output Parameter:
343: .  out - indices in global numbering

345:    Synopsis:
346:    PetscErrorCode ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

348:    Notes: 
349:    The in and out array parameters may be identical.

351:    Level: advanced

353: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
354:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
355:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

357:     Concepts: mapping^local to global

359: M*/

361: /* -----------------------------------------------------------------------------------------*/

365: /*
366:     Creates the global fields in the ISLocalToGlobalMapping structure
367: */
368: static PetscErrorCode ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
369: {
371:   PetscInt       i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

374:   end   = 0;
375:   start = 100000000;

377:   for (i=0; i<n; i++) {
378:     if (idx[i] < 0) continue;
379:     if (idx[i] < start) start = idx[i];
380:     if (idx[i] > end)   end   = idx[i];
381:   }
382:   if (start > end) {start = 0; end = -1;}
383:   mapping->globalstart = start;
384:   mapping->globalend   = end;

386:   PetscMalloc((end-start+2)*sizeof(PetscInt),&globals);
387:   mapping->globals = globals;
388:   for (i=0; i<end-start+1; i++) {
389:     globals[i] = -1;
390:   }
391:   for (i=0; i<n; i++) {
392:     if (idx[i] < 0) continue;
393:     globals[idx[i] - start] = i;
394:   }

396:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(PetscInt));
397:   return(0);
398: }

402: /*@
403:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
404:     specified with a global numbering.

406:     Not collective

408:     Input Parameters:
409: +   mapping - mapping between local and global numbering
410: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
411:            IS_GTOLM_DROP - drops the indices with no local value from the output list
412: .   n - number of global indices to map
413: -   idx - global indices to map

415:     Output Parameters:
416: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
417: -   idxout - local index of each global index, one must pass in an array long enough 
418:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
419:              idxout == PETSC_NULL to determine the required length (returned in nout)
420:              and then allocate the required space and call ISGlobalToLocalMappingApply()
421:              a second time to set the values.

423:     Notes:
424:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

426:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
427:     array to compute these.

429:     Level: advanced

431:     Concepts: mapping^global to local

433: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
434:           ISLocalToGlobalMappingDestroy()
435: @*/
436: PetscErrorCode  ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
437:                                   PetscInt n,const PetscInt idx[],PetscInt *nout,PetscInt idxout[])
438: {
439:   PetscInt       i,*globals,nf = 0,tmp,start,end;

444:   if (!mapping->globals) {
445:     ISGlobalToLocalMappingSetUp_Private(mapping);
446:   }
447:   globals = mapping->globals;
448:   start   = mapping->globalstart;
449:   end     = mapping->globalend;

451:   if (type == IS_GTOLM_MASK) {
452:     if (idxout) {
453:       for (i=0; i<n; i++) {
454:         if (idx[i] < 0) idxout[i] = idx[i];
455:         else if (idx[i] < start) idxout[i] = -1;
456:         else if (idx[i] > end)   idxout[i] = -1;
457:         else                     idxout[i] = globals[idx[i] - start];
458:       }
459:     }
460:     if (nout) *nout = n;
461:   } else {
462:     if (idxout) {
463:       for (i=0; i<n; i++) {
464:         if (idx[i] < 0) continue;
465:         if (idx[i] < start) continue;
466:         if (idx[i] > end) continue;
467:         tmp = globals[idx[i] - start];
468:         if (tmp < 0) continue;
469:         idxout[nf++] = tmp;
470:       }
471:     } else {
472:       for (i=0; i<n; i++) {
473:         if (idx[i] < 0) continue;
474:         if (idx[i] < start) continue;
475:         if (idx[i] > end) continue;
476:         tmp = globals[idx[i] - start];
477:         if (tmp < 0) continue;
478:         nf++;
479:       }
480:     }
481:     if (nout) *nout = nf;
482:   }

484:   return(0);
485: }

489: /*@C
490:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
491:      each index shared by more than one processor 

493:     Collective on ISLocalToGlobalMapping

495:     Input Parameters:
496: .   mapping - the mapping from local to global indexing

498:     Output Parameter:
499: +   nproc - number of processors that are connected to this one
500: .   proc - neighboring processors
501: .   numproc - number of indices for each subdomain (processor)
502: -   indices - indices of nodes (in local numbering) shared with neighbors (sorted by global numbering)

504:     Level: advanced

506:     Concepts: mapping^local to global

508:     Fortran Usage: 
509: $        ISLocalToGlobalMpngGetInfoSize(ISLocalToGlobalMapping,PetscInt nproc,PetscInt numprocmax,ierr) followed by 
510: $        ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping,PetscInt nproc, PetscInt procs[nproc],PetscInt numprocs[nproc],
511:           PetscInt indices[nproc][numprocmax],ierr)
512:         There is no ISLocalToGlobalMappingRestoreInfo() in Fortran. You must make sure that procs[], numprocs[] and 
513:         indices[][] are large enough arrays, either by allocating them dynamically or defining static ones large enough.


516: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
517:           ISLocalToGlobalMappingRestoreInfo()
518: @*/
519: PetscErrorCode  ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
520: {
522:   PetscMPIInt    size,rank,tag1,tag2,tag3,*len,*source,imdex;
523:   PetscInt       i,n = mapping->n,Ng,ng,max = 0,*lindices = mapping->indices;
524:   PetscInt       *nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
525:   PetscInt       cnt,scale,*ownedsenders,*nownedsenders,rstart,nowned;
526:   PetscInt       node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
527:   PetscInt       first_procs,first_numprocs,*first_indices;
528:   MPI_Request    *recv_waits,*send_waits;
529:   MPI_Status     recv_status,*send_status,*recv_statuses;
530:   MPI_Comm       comm = ((PetscObject)mapping)->comm;
531:   PetscTruth     debug = PETSC_FALSE;

535:   MPI_Comm_size(comm,&size);
536:   MPI_Comm_rank(comm,&rank);
537:   if (size == 1) {
538:     *nproc         = 0;
539:     *procs         = PETSC_NULL;
540:     PetscMalloc(sizeof(PetscInt),numprocs);
541:     (*numprocs)[0] = 0;
542:     PetscMalloc(sizeof(PetscInt*),indices);
543:     (*indices)[0]  = PETSC_NULL;
544:     return(0);
545:   }

547:   PetscOptionsGetTruth(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug,PETSC_NULL);

549:   /*
550:     Notes on ISLocalToGlobalMappingGetInfo

552:     globally owned node - the nodes that have been assigned to this processor in global
553:            numbering, just for this routine.

555:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
556:            boundary (i.e. is has more than one local owner)

558:     locally owned node - node that exists on this processors subdomain

560:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
561:            local subdomain
562:   */
563:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
564:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
565:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

567:   for (i=0; i<n; i++) {
568:     if (lindices[i] > max) max = lindices[i];
569:   }
570:   MPI_Allreduce(&max,&Ng,1,MPIU_INT,MPI_MAX,comm);
571:   Ng++;
572:   MPI_Comm_size(comm,&size);
573:   MPI_Comm_rank(comm,&rank);
574:   scale  = Ng/size + 1;
575:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
576:   rstart = scale*rank;

578:   /* determine ownership ranges of global indices */
579:   PetscMalloc(2*size*sizeof(PetscInt),&nprocs);
580:   PetscMemzero(nprocs,2*size*sizeof(PetscInt));

582:   /* determine owners of each local node  */
583:   PetscMalloc(n*sizeof(PetscInt),&owner);
584:   for (i=0; i<n; i++) {
585:     proc             = lindices[i]/scale; /* processor that globally owns this index */
586:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
587:     owner[i]         = proc;
588:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
589:   }
590:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
591:   PetscInfo1(mapping,"Number of global owners for my local data %d\n",nsends);

593:   /* inform other processors of number of messages and max length*/
594:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
595:   PetscInfo1(mapping,"Number of local owners for my global data %d\n",nrecvs);

597:   /* post receives for owned rows */
598:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(PetscInt),&recvs);
599:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
600:   for (i=0; i<nrecvs; i++) {
601:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
602:   }

604:   /* pack messages containing lists of local nodes to owners */
605:   PetscMalloc((2*n+1)*sizeof(PetscInt),&sends);
606:   PetscMalloc((size+1)*sizeof(PetscInt),&starts);
607:   starts[0]  = 0;
608:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
609:   for (i=0; i<n; i++) {
610:     sends[starts[owner[i]]++] = lindices[i];
611:     sends[starts[owner[i]]++] = i;
612:   }
613:   PetscFree(owner);
614:   starts[0]  = 0;
615:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

617:   /* send the messages */
618:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
619:   PetscMalloc((nsends+1)*sizeof(PetscInt),&dest);
620:   cnt = 0;
621:   for (i=0; i<size; i++) {
622:     if (nprocs[2*i]) {
623:       MPI_Isend(sends+starts[i],2*nprocs[2*i],MPIU_INT,i,tag1,comm,send_waits+cnt);
624:       dest[cnt] = i;
625:       cnt++;
626:     }
627:   }
628:   PetscFree(starts);

630:   /* wait on receives */
631:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&source);
632:   PetscMalloc((nrecvs+1)*sizeof(PetscMPIInt),&len);
633:   cnt  = nrecvs;
634:   PetscMalloc((ng+1)*sizeof(PetscInt),&nownedsenders);
635:   PetscMemzero(nownedsenders,ng*sizeof(PetscInt));
636:   while (cnt) {
637:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
638:     /* unpack receives into our local space */
639:     MPI_Get_count(&recv_status,MPIU_INT,&len[imdex]);
640:     source[imdex]  = recv_status.MPI_SOURCE;
641:     len[imdex]     = len[imdex]/2;
642:     /* count how many local owners for each of my global owned indices */
643:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
644:     cnt--;
645:   }
646:   PetscFree(recv_waits);

648:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
649:   nowned  = 0;
650:   nownedm = 0;
651:   for (i=0; i<ng; i++) {
652:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
653:   }

655:   /* create single array to contain rank of all local owners of each globally owned index */
656:   PetscMalloc((nownedm+1)*sizeof(PetscInt),&ownedsenders);
657:   PetscMalloc((ng+1)*sizeof(PetscInt),&starts);
658:   starts[0] = 0;
659:   for (i=1; i<ng; i++) {
660:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
661:     else starts[i] = starts[i-1];
662:   }

664:   /* for each nontrival globally owned node list all arriving processors */
665:   for (i=0; i<nrecvs; i++) {
666:     for (j=0; j<len[i]; j++) {
667:       node = recvs[2*i*nmax+2*j]-rstart;
668:       if (nownedsenders[node] > 1) {
669:         ownedsenders[starts[node]++] = source[i];
670:       }
671:     }
672:   }

674:   if (debug) { /* -----------------------------------  */
675:     starts[0]    = 0;
676:     for (i=1; i<ng; i++) {
677:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
678:       else starts[i] = starts[i-1];
679:     }
680:     for (i=0; i<ng; i++) {
681:       if (nownedsenders[i] > 1) {
682:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
683:         for (j=0; j<nownedsenders[i]; j++) {
684:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
685:         }
686:         PetscSynchronizedPrintf(comm,"\n");
687:       }
688:     }
689:     PetscSynchronizedFlush(comm);
690:   }/* -----------------------------------  */

692:   /* wait on original sends */
693:   if (nsends) {
694:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
695:     MPI_Waitall(nsends,send_waits,send_status);
696:     PetscFree(send_status);
697:   }
698:   PetscFree(send_waits);
699:   PetscFree(sends);
700:   PetscFree(nprocs);

702:   /* pack messages to send back to local owners */
703:   starts[0]    = 0;
704:   for (i=1; i<ng; i++) {
705:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
706:     else starts[i] = starts[i-1];
707:   }
708:   nsends2 = nrecvs;
709:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&nprocs); /* length of each message */
710:   for (i=0; i<nrecvs; i++) {
711:     nprocs[i] = 1;
712:     for (j=0; j<len[i]; j++) {
713:       node = recvs[2*i*nmax+2*j]-rstart;
714:       if (nownedsenders[node] > 1) {
715:         nprocs[i] += 2 + nownedsenders[node];
716:       }
717:     }
718:   }
719:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
720:   PetscMalloc((nt+1)*sizeof(PetscInt),&sends2);
721:   PetscMalloc((nsends2+1)*sizeof(PetscInt),&starts2);
722:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
723:   /*
724:      Each message is 1 + nprocs[i] long, and consists of 
725:        (0) the number of nodes being sent back 
726:        (1) the local node number,
727:        (2) the number of processors sharing it,
728:        (3) the processors sharing it
729:   */
730:   for (i=0; i<nsends2; i++) {
731:     cnt = 1;
732:     sends2[starts2[i]] = 0;
733:     for (j=0; j<len[i]; j++) {
734:       node = recvs[2*i*nmax+2*j]-rstart;
735:       if (nownedsenders[node] > 1) {
736:         sends2[starts2[i]]++;
737:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
738:         sends2[starts2[i]+cnt++] = nownedsenders[node];
739:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(PetscInt));
740:         cnt += nownedsenders[node];
741:       }
742:     }
743:   }

745:   /* receive the message lengths */
746:   nrecvs2 = nsends;
747:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&lens2);
748:   PetscMalloc((nrecvs2+1)*sizeof(PetscInt),&starts3);
749:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
750:   for (i=0; i<nrecvs2; i++) {
751:     MPI_Irecv(&lens2[i],1,MPIU_INT,dest[i],tag2,comm,recv_waits+i);
752:   }

754:   /* send the message lengths */
755:   for (i=0; i<nsends2; i++) {
756:     MPI_Send(&nprocs[i],1,MPIU_INT,source[i],tag2,comm);
757:   }

759:   /* wait on receives of lens */
760:   if (nrecvs2) {
761:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
762:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
763:     PetscFree(recv_statuses);
764:   }
765:   PetscFree(recv_waits);

767:   starts3[0] = 0;
768:   nt         = 0;
769:   for (i=0; i<nrecvs2-1; i++) {
770:     starts3[i+1] = starts3[i] + lens2[i];
771:     nt          += lens2[i];
772:   }
773:   nt += lens2[nrecvs2-1];

775:   PetscMalloc((nt+1)*sizeof(PetscInt),&recvs2);
776:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
777:   for (i=0; i<nrecvs2; i++) {
778:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPIU_INT,dest[i],tag3,comm,recv_waits+i);
779:   }
780: 
781:   /* send the messages */
782:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
783:   for (i=0; i<nsends2; i++) {
784:     MPI_Isend(sends2+starts2[i],nprocs[i],MPIU_INT,source[i],tag3,comm,send_waits+i);
785:   }

787:   /* wait on receives */
788:   if (nrecvs2) {
789:     PetscMalloc(nrecvs2*sizeof(MPI_Status),&recv_statuses);
790:     MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
791:     PetscFree(recv_statuses);
792:   }
793:   PetscFree(recv_waits);
794:   PetscFree(nprocs);

796:   if (debug) { /* -----------------------------------  */
797:     cnt = 0;
798:     for (i=0; i<nrecvs2; i++) {
799:       nt = recvs2[cnt++];
800:       for (j=0; j<nt; j++) {
801:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
802:         for (k=0; k<recvs2[cnt+1]; k++) {
803:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
804:         }
805:         cnt += 2 + recvs2[cnt+1];
806:         PetscSynchronizedPrintf(comm,"\n");
807:       }
808:     }
809:     PetscSynchronizedFlush(comm);
810:   } /* -----------------------------------  */

812:   /* count number subdomains for each local node */
813:   PetscMalloc(size*sizeof(PetscInt),&nprocs);
814:   PetscMemzero(nprocs,size*sizeof(PetscInt));
815:   cnt  = 0;
816:   for (i=0; i<nrecvs2; i++) {
817:     nt = recvs2[cnt++];
818:     for (j=0; j<nt; j++) {
819:       for (k=0; k<recvs2[cnt+1]; k++) {
820:         nprocs[recvs2[cnt+2+k]]++;
821:       }
822:       cnt += 2 + recvs2[cnt+1];
823:     }
824:   }
825:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
826:   *nproc    = nt;
827:   PetscMalloc((nt+1)*sizeof(PetscInt),procs);
828:   PetscMalloc((nt+1)*sizeof(PetscInt),numprocs);
829:   PetscMalloc((nt+1)*sizeof(PetscInt*),indices);
830:   PetscMalloc(size*sizeof(PetscInt),&bprocs);
831:   cnt       = 0;
832:   for (i=0; i<size; i++) {
833:     if (nprocs[i] > 0) {
834:       bprocs[i]        = cnt;
835:       (*procs)[cnt]    = i;
836:       (*numprocs)[cnt] = nprocs[i];
837:       PetscMalloc(nprocs[i]*sizeof(PetscInt),&(*indices)[cnt]);
838:       cnt++;
839:     }
840:   }

842:   /* make the list of subdomains for each nontrivial local node */
843:   PetscMemzero(*numprocs,nt*sizeof(PetscInt));
844:   cnt  = 0;
845:   for (i=0; i<nrecvs2; i++) {
846:     nt = recvs2[cnt++];
847:     for (j=0; j<nt; j++) {
848:       for (k=0; k<recvs2[cnt+1]; k++) {
849:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
850:       }
851:       cnt += 2 + recvs2[cnt+1];
852:     }
853:   }
854:   PetscFree(bprocs);
855:   PetscFree(recvs2);

857:   /* sort the node indexing by their global numbers */
858:   nt = *nproc;
859:   for (i=0; i<nt; i++) {
860:     PetscMalloc(((*numprocs)[i])*sizeof(PetscInt),&tmp);
861:     for (j=0; j<(*numprocs)[i]; j++) {
862:       tmp[j] = lindices[(*indices)[i][j]];
863:     }
864:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
865:     PetscFree(tmp);
866:   }

868:   if (debug) { /* -----------------------------------  */
869:     nt = *nproc;
870:     for (i=0; i<nt; i++) {
871:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
872:       for (j=0; j<(*numprocs)[i]; j++) {
873:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
874:       }
875:       PetscSynchronizedPrintf(comm,"\n");
876:     }
877:     PetscSynchronizedFlush(comm);
878:   } /* -----------------------------------  */

880:   /* wait on sends */
881:   if (nsends2) {
882:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
883:     MPI_Waitall(nsends2,send_waits,send_status);
884:     PetscFree(send_status);
885:   }

887:   PetscFree(starts3);
888:   PetscFree(dest);
889:   PetscFree(send_waits);

891:   PetscFree(nownedsenders);
892:   PetscFree(ownedsenders);
893:   PetscFree(starts);
894:   PetscFree(starts2);
895:   PetscFree(lens2);

897:   PetscFree(source);
898:   PetscFree(len);
899:   PetscFree(recvs);
900:   PetscFree(nprocs);
901:   PetscFree(sends2);

903:   /* put the information about myself as the first entry in the list */
904:   first_procs    = (*procs)[0];
905:   first_numprocs = (*numprocs)[0];
906:   first_indices  = (*indices)[0];
907:   for (i=0; i<*nproc; i++) {
908:     if ((*procs)[i] == rank) {
909:       (*procs)[0]    = (*procs)[i];
910:       (*numprocs)[0] = (*numprocs)[i];
911:       (*indices)[0]  = (*indices)[i];
912:       (*procs)[i]    = first_procs;
913:       (*numprocs)[i] = first_numprocs;
914:       (*indices)[i]  = first_indices;
915:       break;
916:     }
917:   }
918:   return(0);
919: }

923: /*@C
924:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

926:     Collective on ISLocalToGlobalMapping

928:     Input Parameters:
929: .   mapping - the mapping from local to global indexing

931:     Output Parameter:
932: +   nproc - number of processors that are connected to this one
933: .   proc - neighboring processors
934: .   numproc - number of indices for each processor
935: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

937:     Level: advanced

939: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
940:           ISLocalToGlobalMappingGetInfo()
941: @*/
942: PetscErrorCode  ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,PetscInt *nproc,PetscInt *procs[],PetscInt *numprocs[],PetscInt **indices[])
943: {
945:   PetscInt       i;

948:   PetscFree(*procs);
949:   PetscFree(*numprocs);
950:   if (*indices) {
951:     PetscFree((*indices)[0]);
952:     for (i=1; i<*nproc; i++) {
953:       PetscFree((*indices)[i]);
954:     }
955:     PetscFree(*indices);
956:   }
957:   return(0);
958: }