Actual source code: mpimatmatmult.c
1: #define PETSCMAT_DLL
3: /*
4: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
5: C = A * B
6: */
7: #include ../src/mat/impls/aij/seq/aij.h
8: #include ../src/mat/utils/freespace.h
9: #include ../src/mat/impls/aij/mpi/mpiaij.h
10: #include petscbt.h
11: #include ../src/mat/impls/dense/mpi/mpidense.h
15: PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
16: {
20: if (scall == MAT_INITIAL_MATRIX){
21: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);/* numeric product is computed as well */
22: } else if (scall == MAT_REUSE_MATRIX){
23: MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);
24: } else {
25: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
26: }
27: return(0);
28: }
32: PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
33: {
34: PetscErrorCode ierr;
35: Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr;
38: PetscFree2(mult->startsj,mult->startsj_r);
39: PetscFree(mult->bufa);
40: if (mult->isrowa){ISDestroy(mult->isrowa);}
41: if (mult->isrowb){ISDestroy(mult->isrowb);}
42: if (mult->iscolb){ISDestroy(mult->iscolb);}
43: if (mult->C_seq){MatDestroy(mult->C_seq);}
44: if (mult->A_loc){MatDestroy(mult->A_loc); }
45: if (mult->B_seq){MatDestroy(mult->B_seq);}
46: if (mult->B_loc){MatDestroy(mult->B_loc);}
47: if (mult->B_oth){MatDestroy(mult->B_oth);}
48: PetscFree(mult->abi);
49: PetscFree(mult->abj);
50: PetscFree(mult);
51: return(0);
52: }
54: EXTERN PetscErrorCode MatDestroy_AIJ(Mat);
57: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
58: {
59: PetscErrorCode ierr;
60: PetscContainer container;
61: Mat_MatMatMultMPI *mult=PETSC_NULL;
64: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
65: if (container) {
66: PetscContainerGetPointer(container,(void **)&mult);
67: } else {
68: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
69: }
70: A->ops->destroy = mult->MatDestroy;
71: PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);
72: (*A->ops->destroy)(A);
73: PetscContainerDestroy(container);
74: return(0);
75: }
79: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M)
80: {
81: PetscErrorCode ierr;
82: Mat_MatMatMultMPI *mult;
83: PetscContainer container;
86: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
87: if (container) {
88: PetscContainerGetPointer(container,(void **)&mult);
89: } else {
90: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
91: }
92: /* Note: the container is not duplicated, because it requires deep copying of
93: several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
94: These data sets are only used for repeated calling of MatMatMultNumeric().
95: *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
96: (*mult->MatDuplicate)(A,op,M);
97: (*M)->ops->destroy = mult->MatDestroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
98: (*M)->ops->duplicate = mult->MatDuplicate; /* = MatDuplicate_MPIAIJ */
99: return(0);
100: }
104: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
105: {
106: PetscErrorCode ierr;
107: PetscInt start,end;
108: Mat_MatMatMultMPI *mult;
109: PetscContainer container;
110:
112: if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
113: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
114: }
115: PetscNew(Mat_MatMatMultMPI,&mult);
117: /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
118: MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);
120: /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */
121: start = A->rmap->rstart; end = A->rmap->rend;
122: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);
123: MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);
125: /* compute C_seq = A_seq * B_seq */
126: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);
128: /* create mpi matrix C by concatinating C_seq */
129: PetscObjectReference((PetscObject)mult->C_seq); /* prevent C_seq being destroyed by MatMerge() */
130: MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_INITIAL_MATRIX,C);
131:
132: /* attach the supporting struct to C for reuse of symbolic C */
133: PetscContainerCreate(PETSC_COMM_SELF,&container);
134: PetscContainerSetPointer(container,mult);
135: PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);
136: PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);
137: mult->MatDestroy = (*C)->ops->destroy;
138: mult->MatDuplicate = (*C)->ops->duplicate;
140: (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
141: (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
142: return(0);
143: }
145: /* This routine is called ONLY in the case of reusing previously computed symbolic C */
148: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
149: {
150: PetscErrorCode ierr;
151: Mat *seq;
152: Mat_MatMatMultMPI *mult;
153: PetscContainer container;
156: PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);
157: if (container) {
158: PetscContainerGetPointer(container,(void **)&mult);
159: } else {
160: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
161: }
163: seq = &mult->B_seq;
164: MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);
165: mult->B_seq = *seq;
166:
167: seq = &mult->A_loc;
168: MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);
169: mult->A_loc = *seq;
171: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);
173: PetscObjectReference((PetscObject)mult->C_seq);
174: MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);
175: return(0);
176: }
180: PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
181: {
185: if (scall == MAT_INITIAL_MATRIX){
186: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
187: }
188: MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
189: return(0);
190: }
192: typedef struct {
193: Mat workB;
194: PetscScalar *rvalues,*svalues;
195: MPI_Request *rwaits,*swaits;
196: } MPIAIJ_MPIDense;
198: PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
199: {
200: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
201: PetscErrorCode ierr;
204: if (contents->workB) {MatDestroy(contents->workB);}
205: PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
206: PetscFree(contents);
207: return(0);
208: }
212: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
213: {
214: PetscErrorCode ierr;
215: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
216: PetscInt nz = aij->B->cmap->n;
217: PetscContainer cont;
218: MPIAIJ_MPIDense *contents;
219: VecScatter ctx = aij->Mvctx;
220: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
221: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
222: PetscInt m=A->rmap->n,n=B->cmap->n;
225: MatCreate(((PetscObject)B)->comm,C);
226: MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);
227: MatSetType(*C,MATMPIDENSE);
228: MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);
229: MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);
231: PetscContainerCreate(((PetscObject)A)->comm,&cont);
232: PetscNew(MPIAIJ_MPIDense,&contents);
233: PetscContainerSetPointer(cont,contents);
234: PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);
236: /* Create work matrix used to store off processor rows of B needed for local product */
237: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);
239: /* Create work arrays needed */
240: PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues,
241: B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues,
242: from->n,MPI_Request,&contents->rwaits,
243: to->n,MPI_Request,&contents->swaits);
245: PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);
246: PetscContainerDestroy(cont);
247: return(0);
248: }
250: /*
251: Performs an efficient scatter on the rows of B needed by this process; this is
252: a modification of the VecScatterBegin_() routines.
253: */
254: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
255: {
256: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
257: PetscErrorCode ierr;
258: PetscScalar *b,*w,*svalues,*rvalues;
259: VecScatter ctx = aij->Mvctx;
260: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
261: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
262: PetscInt i,j,k;
263: PetscInt *sindices,*sstarts,*rindices,*rstarts;
264: PetscMPIInt *sprocs,*rprocs,nrecvs;
265: MPI_Request *swaits,*rwaits;
266: MPI_Comm comm = ((PetscObject)A)->comm;
267: PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n;
268: MPI_Status status;
269: MPIAIJ_MPIDense *contents;
270: PetscContainer cont;
271: Mat workB;
274: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);
275: PetscContainerGetPointer(cont,(void**)&contents);
277: workB = *outworkB = contents->workB;
278: if (nrows != workB->rmap->n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n);
279: sindices = to->indices;
280: sstarts = to->starts;
281: sprocs = to->procs;
282: swaits = contents->swaits;
283: svalues = contents->svalues;
285: rindices = from->indices;
286: rstarts = from->starts;
287: rprocs = from->procs;
288: rwaits = contents->rwaits;
289: rvalues = contents->rvalues;
291: MatGetArray(B,&b);
292: MatGetArray(workB,&w);
294: for (i=0; i<from->n; i++) {
295: MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
296: }
298: for (i=0; i<to->n; i++) {
299: /* pack a message at a time */
300: CHKMEMQ;
301: for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
302: for (k=0; k<ncols; k++) {
303: svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
304: }
305: }
306: CHKMEMQ;
307: MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
308: }
310: nrecvs = from->n;
311: while (nrecvs) {
312: MPI_Waitany(from->n,rwaits,&imdex,&status);
313: nrecvs--;
314: /* unpack a message at a time */
315: CHKMEMQ;
316: for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
317: for (k=0; k<ncols; k++) {
318: w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
319: }
320: }
321: CHKMEMQ;
322: }
323: if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)}
325: MatRestoreArray(B,&b);
326: MatRestoreArray(workB,&w);
327: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
328: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
329: return(0);
330: }
335: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
336: {
337: PetscErrorCode ierr;
338: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
339: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
340: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
341: Mat workB;
345: /* diagonal block of A times all local rows of B*/
346: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
348: /* get off processor parts of B needed to complete the product */
349: MatMPIDenseScatter(A,B,C,&workB);
351: /* off-diagonal block of A times nonlocal rows of B */
352: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
353: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
354: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
355: return(0);
356: }