Actual source code: mmdense.c

  1: #define PETSCMAT_DLL

  3: /*
  4:    Support for the parallel dense matrix vector multiply
  5: */
 6:  #include ../src/mat/impls/dense/mpi/mpidense.h
 7:  #include petscblaslapack.h

 11: PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
 12: {
 13:   Mat_MPIDense *mdn = (Mat_MPIDense*)mat->data;
 15:   IS           from,to;
 16:   Vec          gvec;

 19:   /* Create local vector that is used to scatter into */
 20:   VecCreateSeq(PETSC_COMM_SELF,mat->cmap->N,&mdn->lvec);

 22:   /* Create temporary index set for building scatter gather */
 23:   ISCreateStride(((PetscObject)mat)->comm,mat->cmap->N,0,1,&from);
 24:   ISCreateStride(PETSC_COMM_SELF,mat->cmap->N,0,1,&to);

 26:   /* Create temporary global vector to generate scatter context */
 27:   /* n    = mdn->cowners[mdn->rank+1] - mdn->cowners[mdn->rank]; */

 29:   VecCreateMPIWithArray(((PetscObject)mat)->comm,mdn->nvec,mat->cmap->N,PETSC_NULL,&gvec);

 31:   /* Generate the scatter context */
 32:   VecScatterCreate(gvec,from,mdn->lvec,to,&mdn->Mvctx);
 33:   PetscLogObjectParent(mat,mdn->Mvctx);
 34:   PetscLogObjectParent(mat,mdn->lvec);
 35:   PetscLogObjectParent(mat,from);
 36:   PetscLogObjectParent(mat,to);
 37:   PetscLogObjectParent(mat,gvec);

 39:   ISDestroy(to);
 40:   ISDestroy(from);
 41:   VecDestroy(gvec);
 42:   return(0);
 43: }

 45: EXTERN PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
 48: PetscErrorCode MatGetSubMatrices_MPIDense(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
 49: {
 51:   PetscInt           nmax,nstages_local,nstages,i,pos,max_no;

 54:   /* Allocate memory to hold all the submatrices */
 55:   if (scall != MAT_REUSE_MATRIX) {
 56:     PetscMalloc((ismax+1)*sizeof(Mat),submat);
 57:   }
 58:   /* Determine the number of stages through which submatrices are done */
 59:   nmax          = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
 60:   if (!nmax) nmax = 1;
 61:   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);

 63:   /* Make sure every processor loops through the nstages */
 64:   MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);


 67:   for (i=0,pos=0; i<nstages; i++) {
 68:     if (pos+nmax <= ismax) max_no = nmax;
 69:     else if (pos == ismax) max_no = 0;
 70:     else                   max_no = ismax-pos;
 71:     MatGetSubMatrices_MPIDense_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);
 72:     pos += max_no;
 73:   }
 74:   return(0);
 75: }
 76: /* -------------------------------------------------------------------------*/
 79: PetscErrorCode MatGetSubMatrices_MPIDense_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats)
 80: {
 81:   Mat_MPIDense   *c = (Mat_MPIDense*)C->data;
 82:   Mat            A = c->A;
 83:   Mat_SeqDense   *a = (Mat_SeqDense*)A->data,*mat;
 85:   PetscMPIInt    rank,size,tag0,tag1,idex,end,i;
 86:   PetscInt       N = C->cmap->N,rstart = C->rmap->rstart,count;
 87:   const PetscInt **irow,**icol,*irow_i;
 88:   PetscInt       *nrow,*ncol,*w1,*w3,*w4,*rtable,start;
 89:   PetscInt       **sbuf1,m,j,k,l,ct1,**rbuf1,row,proc;
 90:   PetscInt       nrqs,msz,**ptr,*ctr,*pa,*tmp,bsz,nrqr;
 91:   PetscInt       is_no,jmax,**rmap,*rmap_i;
 92:   PetscInt       ctr_j,*sbuf1_j,*rbuf1_i;
 93:   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
 94:   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status2;
 95:   MPI_Comm       comm;
 96:   PetscScalar    **rbuf2,**sbuf2;
 97:   PetscTruth     sorted;

100:   comm   = ((PetscObject)C)->comm;
101:   tag0   = ((PetscObject)C)->tag;
102:   size   = c->size;
103:   rank   = c->rank;
104:   m      = C->rmap->N;
105: 
106:   /* Get some new tags to keep the communication clean */
107:   PetscObjectGetNewTag((PetscObject)C,&tag1);

109:     /* Check if the col indices are sorted */
110:   for (i=0; i<ismax; i++) {
111:     ISSorted(isrow[i],&sorted);
112:     if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted");
113:     ISSorted(iscol[i],&sorted);
114:     if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted");
115:   }

117:   PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,m,PetscInt,&rtable);
118:   for (i=0; i<ismax; i++) {
119:     ISGetIndices(isrow[i],&irow[i]);
120:     ISGetIndices(iscol[i],&icol[i]);
121:     ISGetLocalSize(isrow[i],&nrow[i]);
122:     ISGetLocalSize(iscol[i],&ncol[i]);
123:   }

125:   /* Create hash table for the mapping :row -> proc*/
126:   for (i=0,j=0; i<size; i++) {
127:     jmax = C->rmap->range[i+1];
128:     for (; j<jmax; j++) {
129:       rtable[j] = i;
130:     }
131:   }

133:   /* evaluate communication - mesg to who,length of mesg, and buffer space
134:      required. Based on this, buffers are allocated, and data copied into them*/
135:   PetscMalloc3(2*size,PetscInt,&w1,size,PetscInt,&w3,size,PetscInt,&w4);
136:   PetscMemzero(w1,size*2*sizeof(PetscInt)); /* initialize work vector*/
137:   PetscMemzero(w3,size*sizeof(PetscInt)); /* initialize work vector*/
138:   for (i=0; i<ismax; i++) {
139:     PetscMemzero(w4,size*sizeof(PetscInt)); /* initialize work vector*/
140:     jmax   = nrow[i];
141:     irow_i = irow[i];
142:     for (j=0; j<jmax; j++) {
143:       row  = irow_i[j];
144:       proc = rtable[row];
145:       w4[proc]++;
146:     }
147:     for (j=0; j<size; j++) {
148:       if (w4[j]) { w1[2*j] += w4[j];  w3[j]++;}
149:     }
150:   }
151: 
152:   nrqs       = 0;              /* no of outgoing messages */
153:   msz        = 0;              /* total mesg length (for all procs) */
154:   w1[2*rank] = 0;              /* no mesg sent to self */
155:   w3[rank]   = 0;
156:   for (i=0; i<size; i++) {
157:     if (w1[2*i])  { w1[2*i+1] = 1; nrqs++;} /* there exists a message to proc i */
158:   }
159:   PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa); /*(proc -array)*/
160:   for (i=0,j=0; i<size; i++) {
161:     if (w1[2*i]) { pa[j] = i; j++; }
162:   }

164:   /* Each message would have a header = 1 + 2*(no of IS) + data */
165:   for (i=0; i<nrqs; i++) {
166:     j       = pa[i];
167:     w1[2*j] += w1[2*j+1] + 2* w3[j];
168:     msz     += w1[2*j];
169:   }
170:   /* Do a global reduction to determine how many messages to expect*/
171:   PetscMaxSum(comm,w1,&bsz,&nrqr);

173:   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
174:   PetscMalloc((nrqr+1)*sizeof(PetscInt*),&rbuf1);
175:   PetscMalloc(nrqr*bsz*sizeof(PetscInt),&rbuf1[0]);
176:   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
177: 
178:   /* Post the receives */
179:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&r_waits1);
180:   for (i=0; i<nrqr; ++i) {
181:     MPI_Irecv(rbuf1[i],bsz,MPIU_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
182:   }

184:   /* Allocate Memory for outgoing messages */
185:   PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);
186:   PetscMemzero(sbuf1,size*sizeof(PetscInt*));
187:   PetscMemzero(ptr,size*sizeof(PetscInt*));
188:   {
189:     PetscInt *iptr = tmp,ict = 0;
190:     for (i=0; i<nrqs; i++) {
191:       j         = pa[i];
192:       iptr     += ict;
193:       sbuf1[j]  = iptr;
194:       ict       = w1[2*j];
195:     }
196:   }

198:   /* Form the outgoing messages */
199:   /* Initialize the header space */
200:   for (i=0; i<nrqs; i++) {
201:     j           = pa[i];
202:     sbuf1[j][0] = 0;
203:     PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));
204:     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
205:   }
206: 
207:   /* Parse the isrow and copy data into outbuf */
208:   for (i=0; i<ismax; i++) {
209:     PetscMemzero(ctr,size*sizeof(PetscInt));
210:     irow_i = irow[i];
211:     jmax   = nrow[i];
212:     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
213:       row  = irow_i[j];
214:       proc = rtable[row];
215:       if (proc != rank) { /* copy to the outgoing buf*/
216:         ctr[proc]++;
217:         *ptr[proc] = row;
218:         ptr[proc]++;
219:       }
220:     }
221:     /* Update the headers for the current IS */
222:     for (j=0; j<size; j++) { /* Can Optimise this loop too */
223:       if ((ctr_j = ctr[j])) {
224:         sbuf1_j        = sbuf1[j];
225:         k              = ++sbuf1_j[0];
226:         sbuf1_j[2*k]   = ctr_j;
227:         sbuf1_j[2*k-1] = i;
228:       }
229:     }
230:   }

232:   /*  Now  post the sends */
233:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);
234:   for (i=0; i<nrqs; ++i) {
235:     j = pa[i];
236:     MPI_Isend(sbuf1[j],w1[2*j],MPIU_INT,j,tag0,comm,s_waits1+i);
237:   }

239:   /* Post recieves to capture the row_data from other procs */
240:   PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);
241:   PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf2);
242:   for (i=0; i<nrqs; i++) {
243:     j        = pa[i];
244:     count    = (w1[2*j] - (2*sbuf1[j][0] + 1))*N;
245:     PetscMalloc((count+1)*sizeof(PetscScalar),&rbuf2[i]);
246:     MPI_Irecv(rbuf2[i],count,MPIU_SCALAR,j,tag1,comm,r_waits2+i);
247:   }

249:   /* Receive messages(row_nos) and then, pack and send off the rowvalues
250:      to the correct processors */

252:   PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);
253:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);
254:   PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf2);
255: 
256:   {
257:     PetscScalar *sbuf2_i,*v_start;
258:     PetscInt         s_proc;
259:     for (i=0; i<nrqr; ++i) {
260:       MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);
261:       s_proc          = r_status1[i].MPI_SOURCE; /* send processor */
262:       rbuf1_i         = rbuf1[idex]; /* Actual message from s_proc */
263:       /* no of rows = end - start; since start is array idex[], 0idex, whel end
264:          is length of the buffer - which is 1idex */
265:       start           = 2*rbuf1_i[0] + 1;
266:       MPI_Get_count(r_status1+i,MPIU_INT,&end);
267:       /* allocate memory sufficinet to hold all the row values */
268:       PetscMalloc((end-start)*N*sizeof(PetscScalar),&sbuf2[idex]);
269:       sbuf2_i      = sbuf2[idex];
270:       /* Now pack the data */
271:       for (j=start; j<end; j++) {
272:         row = rbuf1_i[j] - rstart;
273:         v_start = a->v + row;
274:         for (k=0; k<N; k++) {
275:           sbuf2_i[0] = v_start[0];
276:           sbuf2_i++; v_start += C->rmap->n;
277:         }
278:       }
279:       /* Now send off the data */
280:       MPI_Isend(sbuf2[idex],(end-start)*N,MPIU_SCALAR,s_proc,tag1,comm,s_waits2+i);
281:     }
282:   }
283:   /* End Send-Recv of IS + row_numbers */
284:   PetscFree(r_status1);
285:   PetscFree(r_waits1);
286:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);
287:   if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status1);}
288:   PetscFree(s_status1);
289:   PetscFree(s_waits1);

291:   /* Create the submatrices */
292:   if (scall == MAT_REUSE_MATRIX) {
293:     for (i=0; i<ismax; i++) {
294:       mat = (Mat_SeqDense *)(submats[i]->data);
295:       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) {
296:         SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
297:       }
298:       PetscMemzero(mat->v,submats[i]->rmap->n*submats[i]->cmap->n*sizeof(PetscScalar));
299:       submats[i]->factor = C->factor;
300:     }
301:   } else {
302:     for (i=0; i<ismax; i++) {
303:       MatCreate(PETSC_COMM_SELF,submats+i);
304:       MatSetSizes(submats[i],nrow[i],ncol[i],nrow[i],ncol[i]);
305:       MatSetType(submats[i],((PetscObject)A)->type_name);
306:       MatSeqDenseSetPreallocation(submats[i],PETSC_NULL);
307:     }
308:   }
309: 
310:   /* Assemble the matrices */
311:   {
312:     PetscInt         col;
313:     PetscScalar *imat_v,*mat_v,*imat_vi,*mat_vi;
314: 
315:     for (i=0; i<ismax; i++) {
316:       mat       = (Mat_SeqDense*)submats[i]->data;
317:       mat_v     = a->v;
318:       imat_v    = mat->v;
319:       irow_i    = irow[i];
320:       m         = nrow[i];
321:       for (j=0; j<m; j++) {
322:         row      = irow_i[j] ;
323:         proc     = rtable[row];
324:         if (proc == rank) {
325:           row      = row - rstart;
326:           mat_vi   = mat_v + row;
327:           imat_vi  = imat_v + j;
328:           for (k=0; k<ncol[i]; k++) {
329:             col = icol[i][k];
330:             imat_vi[k*m] = mat_vi[col*C->rmap->n];
331:           }
332:         }
333:       }
334:     }
335:   }

337:   /* Create row map-> This maps c->row to submat->row for each submat*/
338:   /* this is a very expensive operation wrt memory usage */
339:   PetscMalloc(ismax*sizeof(PetscInt*),&rmap);
340:   PetscMalloc(ismax*C->rmap->N*sizeof(PetscInt),&rmap[0]);
341:   PetscMemzero(rmap[0],ismax*C->rmap->N*sizeof(PetscInt));
342:   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->rmap->N;}
343:   for (i=0; i<ismax; i++) {
344:     rmap_i = rmap[i];
345:     irow_i = irow[i];
346:     jmax   = nrow[i];
347:     for (j=0; j<jmax; j++) {
348:       rmap_i[irow_i[j]] = j;
349:     }
350:   }
351: 
352:   /* Now Receive the row_values and assemble the rest of the matrix */
353:   PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);
354:   {
355:     PetscInt    is_max,tmp1,col,*sbuf1_i,is_sz;
356:     PetscScalar *rbuf2_i,*imat_v,*imat_vi;
357: 
358:     for (tmp1=0; tmp1<nrqs; tmp1++) { /* For each message */
359:       MPI_Waitany(nrqs,r_waits2,&i,r_status2+tmp1);
360:       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
361:       sbuf1_i = sbuf1[pa[i]];
362:       is_max  = sbuf1_i[0];
363:       ct1     = 2*is_max+1;
364:       rbuf2_i = rbuf2[i];
365:       for (j=1; j<=is_max; j++) { /* For each IS belonging to the message */
366:         is_no     = sbuf1_i[2*j-1];
367:         is_sz     = sbuf1_i[2*j];
368:         mat       = (Mat_SeqDense*)submats[is_no]->data;
369:         imat_v    = mat->v;
370:         rmap_i    = rmap[is_no];
371:         m         = nrow[is_no];
372:         for (k=0; k<is_sz; k++,rbuf2_i+=N) {  /* For each row */
373:           row      = sbuf1_i[ct1]; ct1++;
374:           row      = rmap_i[row];
375:           imat_vi  = imat_v + row;
376:           for (l=0; l<ncol[is_no]; l++) { /* For each col */
377:             col = icol[is_no][l];
378:             imat_vi[l*m] = rbuf2_i[col];
379:           }
380:         }
381:       }
382:     }
383:   }
384:   /* End Send-Recv of row_values */
385:   PetscFree(r_status2);
386:   PetscFree(r_waits2);
387:   PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);
388:   if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status2);}
389:   PetscFree(s_status2);
390:   PetscFree(s_waits2);

392:   /* Restore the indices */
393:   for (i=0; i<ismax; i++) {
394:     ISRestoreIndices(isrow[i],irow+i);
395:     ISRestoreIndices(iscol[i],icol+i);
396:   }

398:   /* Destroy allocated memory */
399:   PetscFree5(irow,icol,nrow,ncol,rtable);
400:   PetscFree3(w1,w3,w4);
401:   PetscFree(pa);

403:   for (i=0; i<nrqs; ++i) {
404:     PetscFree(rbuf2[i]);
405:   }
406:   PetscFree(rbuf2);
407:   PetscFree4(sbuf1,ptr,tmp,ctr);
408:   PetscFree(rbuf1[0]);
409:   PetscFree(rbuf1);

411:   for (i=0; i<nrqr; ++i) {
412:     PetscFree(sbuf2[i]);
413:   }

415:   PetscFree(sbuf2);
416:   PetscFree(rmap[0]);
417:   PetscFree(rmap);

419:   for (i=0; i<ismax; i++) {
420:     MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);
421:     MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);
422:   }

424:   return(0);
425: }

429: PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
430: {
431:   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;
432:   Mat_SeqDense   *a = (Mat_SeqDense*)A->A->data;
433:   PetscScalar    oalpha = alpha;
435:   PetscBLASInt   one = 1,nz = PetscBLASIntCast(inA->rmap->n*inA->cmap->N);

438:   BLASscal_(&nz,&oalpha,a->v,&one);
439:   PetscLogFlops(nz);
440:   return(0);
441: }