Actual source code: subcomm.c

  1: #define PETSC_DLL
  2: /*
  3:      Provides utility routines for split MPI communicator.
  4: */
 5:  #include petscsys.h

  9: PetscErrorCode  PetscSubcommDestroy(PetscSubcomm psubcomm)
 10: {

 14:   PetscFree(psubcomm);
 15:   return(0);
 16: }

 20: /*@C
 21:   PetscSubcommCreate - Create a PetscSubcomm context.

 23:    Collective on MPI_Comm

 25:    Input Parameter:
 26: +  comm - MPI communicator
 27: -  nsubcomm - the number of subcommunicators to be created

 29:    Output Parameter:
 30: .  psubcomm - location to store the PetscSubcomm context


 33:    Notes:
 34:    To avoid data scattering from subcomm back to original comm, we create subcommunicators 
 35:    by iteratively taking a process into a subcommunicator. 
 36:    Example: size=4, nsubcomm=(*psubcomm)->n=3
 37:      comm=(*psubcomm)->parent:
 38:       rank:     [0]  [1]  [2]  [3]
 39:       color:     0    1    2    0

 41:      subcomm=(*psubcomm)->comm:
 42:       subrank:  [0]  [0]  [0]  [1]    

 44:      dupcomm=(*psubcomm)->dupparent:
 45:       duprank:  [0]  [2]  [3]  [1]

 47:      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
 48:            subcomm[color = 1] has subsize=1, owns process [1]
 49:            subcomm[color = 2] has subsize=1, owns process [2]
 50:            dupcomm has same number of processes as comm, and its duprank maps
 51:            processes in subcomm contiguously into a 1d array:
 52:             duprank: [0] [1]      [2]         [3]
 53:             rank:    [0] [3]      [1]         [2]
 54:                     subcomm[0] subcomm[1]  subcomm[2]

 56:    Level: advanced

 58: .keywords: communicator, create

 60: .seealso: PetscSubcommDestroy()
 61: @*/
 62: PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscInt nsubcomm,PetscSubcomm *psubcomm)
 63: {
 65:   PetscMPIInt    rank,size,*subsize,duprank,subrank;
 66:   PetscInt       np_subcomm,nleftover,i,j,color;
 67:   MPI_Comm       subcomm=0,dupcomm=0;
 68:   PetscSubcomm   psubcomm_tmp;

 71:   MPI_Comm_rank(comm,&rank);
 72:   MPI_Comm_size(comm,&size);
 73:   if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);

 75:   /* get size of each subcommunicator */
 76:   PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
 77:   np_subcomm = size/nsubcomm;
 78:   nleftover  = size - nsubcomm*np_subcomm;
 79:   for (i=0; i<nsubcomm; i++){
 80:     subsize[i] = np_subcomm;
 81:     if (i<nleftover) subsize[i]++;
 82:   }

 84:   /* find color for this proc */
 85:   color   = rank%nsubcomm;
 86:   subrank = rank/nsubcomm;

 88:   MPI_Comm_split(comm,color,subrank,&subcomm);

 90:   j = 0; duprank = 0;
 91:   for (i=0; i<nsubcomm; i++){
 92:     if (j == color){
 93:       duprank += subrank;
 94:       break;
 95:     }
 96:     duprank += subsize[i]; j++;
 97:   }
 98:   PetscFree(subsize);
 99: 
100:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
101:   MPI_Comm_split(comm,0,duprank,&dupcomm);
102: 
103:   PetscNew(struct _n_PetscSubcomm,&psubcomm_tmp);
104:   psubcomm_tmp->parent    = comm;
105:   psubcomm_tmp->dupparent = dupcomm;
106:   psubcomm_tmp->comm      = subcomm;
107:   psubcomm_tmp->n         = nsubcomm;
108:   psubcomm_tmp->color     = color;
109:   *psubcomm = psubcomm_tmp;
110:   return(0);
111: }