Actual source code: redistribute.c
1: #define PETSCKSP_DLL
3: /*
4: This file defines a "solve the problem redistributely on each subgroup of processor" preconditioner.
5: */
6: #include private/pcimpl.h
7: #include petscksp.h
9: typedef struct {
10: KSP ksp;
11: Vec x,b;
12: VecScatter scatter;
13: IS is;
14: PetscInt dcnt,*drows; /* these are the local rows that have only diagonal entry */
15: PetscScalar *diag;
16: Vec work;
17: } PC_Redistribute;
21: static PetscErrorCode PCView_Redistribute(PC pc,PetscViewer viewer)
22: {
23: PC_Redistribute *red = (PC_Redistribute*)pc->data;
24: PetscErrorCode ierr;
25: PetscTruth iascii,isstring;
26: PetscInt ncnt,N;
29: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
30: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_STRING,&isstring);
31: if (iascii) {
32: MPI_Allreduce(&red->dcnt,&ncnt,1,MPIU_INT,MPI_SUM,((PetscObject)pc)->comm);
33: MatGetSize(pc->pmat,&N,PETSC_NULL);
34: PetscViewerASCIIPrintf(viewer," Number rows eliminated %D Percentage rows eliminated %g\n",ncnt,100.0*((PetscReal)ncnt)/((PetscReal)N));
35: PetscViewerASCIIPrintf(viewer," Redistribute preconditioner: \n");
36: KSPView(red->ksp,viewer);
37: } else if (isstring) {
38: PetscViewerStringSPrintf(viewer," Redistribute preconditioner");
39: KSPView(red->ksp,viewer);
40: } else {
41: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for PC redistribute",((PetscObject)viewer)->type_name);
42: }
43: return(0);
44: }
46: #include private/matimpl.h
49: static PetscErrorCode PCSetUp_Redistribute(PC pc)
50: {
51: PC_Redistribute *red = (PC_Redistribute*)pc->data;
52: PetscErrorCode ierr;
53: MPI_Comm comm;
54: PetscInt rstart,rend,i,nz,cnt,*rows,ncnt,dcnt,*drows;
55: PetscLayout map,nmap;
56: PetscMPIInt size,rank,imdex,tag,n;
57: PetscInt *source = PETSC_NULL;
58: PetscMPIInt *nprocs = PETSC_NULL,nrecvs;
59: PetscInt j,nsends;
60: PetscInt *owner = PETSC_NULL,*starts = PETSC_NULL,count,slen;
61: PetscInt *rvalues,*svalues,recvtotal;
62: PetscMPIInt *onodes1,*olengths1;
63: MPI_Request *send_waits = PETSC_NULL,*recv_waits = PETSC_NULL;
64: MPI_Status recv_status,*send_status;
65: Vec tvec,diag;
66: Mat tmat;
67: const PetscScalar *d;
70: if (pc->setupcalled) {
71: KSPGetOperators(red->ksp,PETSC_NULL,&tmat,PETSC_NULL);
72: MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_REUSE_MATRIX,&tmat);
73: KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
74: } else {
75: PetscObjectGetComm((PetscObject)pc,&comm);
76: MPI_Comm_size(comm,&size);
77: MPI_Comm_rank(comm,&rank);
78: PetscObjectGetNewTag((PetscObject)pc,&tag);
80: /* count non-diagonal rows on process */
81: MatGetOwnershipRange(pc->mat,&rstart,&rend);
82: cnt = 0;
83: for (i=rstart; i<rend; i++) {
84: MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
85: if (nz > 1) cnt++;
86: MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
87: }
88: PetscMalloc(cnt*sizeof(PetscInt),&rows);
89: PetscMalloc((rend - rstart - cnt)*sizeof(PetscInt),&drows);
91: /* list non-diagonal rows on process */
92: cnt = 0; dcnt = 0;
93: for (i=rstart; i<rend; i++) {
94: MatGetRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
95: if (nz > 1) rows[cnt++] = i;
96: else drows[dcnt++] = i - rstart;
97: MatRestoreRow(pc->mat,i,&nz,PETSC_NULL,PETSC_NULL);
98: }
100: /* create PetscLayout for non-diagonal rows on each process */
101: PetscLayoutCreate(comm,&map);
102: PetscLayoutSetLocalSize(map,cnt);
103: PetscLayoutSetBlockSize(map,1);
104: PetscLayoutSetUp(map);
105: rstart = map->rstart;
106: rend = map->rend;
107:
108: /* create PetscLayout for load-balanced non-diagonal rows on each process */
109: PetscLayoutCreate(comm,&nmap);
110: MPI_Allreduce(&cnt,&ncnt,1,MPIU_INT,MPI_SUM,comm);
111: PetscLayoutSetSize(nmap,ncnt);
112: PetscLayoutSetBlockSize(nmap,1);
113: PetscLayoutSetUp(nmap);
115: PetscInfo2(pc,"Number of diagonal rows eliminated %d, percentage eliminated %g\n",pc->pmat->rmap->N-ncnt,((PetscReal)(pc->pmat->rmap->N-ncnt))/((PetscReal)(pc->pmat->rmap->N)));
116: /*
117: this code is taken from VecScatterCreate_PtoS()
118: Determines what rows need to be moved where to
119: load balance the non-diagonal rows
120: */
121: /* count number of contributors to each processor */
122: PetscMalloc2(size,PetscMPIInt,&nprocs,cnt,PetscInt,&owner);
123: PetscMemzero(nprocs,size*sizeof(PetscMPIInt));
124: j = 0;
125: nsends = 0;
126: for (i=rstart; i<rend; i++) {
127: if (i < nmap->range[j]) j = 0;
128: for (; j<size; j++) {
129: if (i < nmap->range[j+1]) {
130: if (!nprocs[j]++) nsends++;
131: owner[i-rstart] = j;
132: break;
133: }
134: }
135: }
136: /* inform other processors of number of messages and max length*/
137: PetscGatherNumberOfMessages(comm,PETSC_NULL,nprocs,&nrecvs);
138: PetscGatherMessageLengths(comm,nsends,nrecvs,nprocs,&onodes1,&olengths1);
139: PetscSortMPIIntWithArray(nrecvs,onodes1,olengths1);
140: recvtotal = 0; for (i=0; i<nrecvs; i++) recvtotal += olengths1[i];
141:
142: /* post receives: rvalues - rows I will own; count - nu */
143: PetscMalloc3(recvtotal,PetscInt,&rvalues,nrecvs,PetscInt,&source,nrecvs,MPI_Request,&recv_waits);
144: count = 0;
145: for (i=0; i<nrecvs; i++) {
146: MPI_Irecv((rvalues+count),olengths1[i],MPIU_INT,onodes1[i],tag,comm,recv_waits+i);
147: count += olengths1[i];
148: }
150: /* do sends:
151: 1) starts[i] gives the starting index in svalues for stuff going to
152: the ith processor
153: */
154: PetscMalloc3(cnt,PetscInt,&svalues,nsends,MPI_Request,&send_waits,size,PetscInt,&starts);
155: starts[0] = 0;
156: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
157: for (i=0; i<cnt; i++) {
158: svalues[starts[owner[i]]++] = rows[i];
159: }
160: for (i=0; i<cnt; i++) rows[i] = rows[i] - rstart;
161: red->drows = drows;
162: red->dcnt = dcnt;
163: PetscFree(rows);
165: starts[0] = 0;
166: for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
167: count = 0;
168: for (i=0; i<size; i++) {
169: if (nprocs[i]) {
170: MPI_Isend(svalues+starts[i],nprocs[i],MPIU_INT,i,tag,comm,send_waits+count++);
171: }
172: }
173:
174: /* wait on receives */
175: count = nrecvs;
176: slen = 0;
177: while (count) {
178: MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
179: /* unpack receives into our local space */
180: MPI_Get_count(&recv_status,MPIU_INT,&n);
181: slen += n;
182: count--;
183: }
184: if (slen != recvtotal) SETERRQ2(PETSC_ERR_PLIB,"Total message lengths %D not expected %D",slen,recvtotal);
185:
186: ISCreateGeneral(comm,slen,rvalues,&red->is);
187:
188: /* free up all work space */
189: PetscFree(olengths1);
190: PetscFree(onodes1);
191: PetscFree3(rvalues,source,recv_waits);
192: PetscFree2(nprocs,owner);
193: if (nsends) { /* wait on sends */
194: PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
195: MPI_Waitall(nsends,send_waits,send_status);
196: PetscFree(send_status);
197: }
198: PetscFree3(svalues,send_waits,starts);
199: PetscLayoutDestroy(map);
200: PetscLayoutDestroy(nmap);
202: VecCreateMPI(comm,slen,PETSC_DETERMINE,&red->b);
203: VecDuplicate(red->b,&red->x);
204: MatGetVecs(pc->pmat,&tvec,PETSC_NULL);
205: VecScatterCreate(tvec,red->is,red->b,PETSC_NULL,&red->scatter);
206: VecDestroy(tvec);
207: MatGetSubMatrix(pc->pmat,red->is,red->is,MAT_INITIAL_MATRIX,&tmat);
208: KSPSetOperators(red->ksp,tmat,tmat,SAME_NONZERO_PATTERN);
209: MatDestroy(tmat);
210: }
212: /* get diagonal portion of matrix */
213: PetscMalloc(red->dcnt*sizeof(PetscScalar),&red->diag);
214: MatGetVecs(pc->pmat,&diag,PETSC_NULL);
215: MatGetDiagonal(pc->pmat,diag);
216: VecGetArray(diag,(PetscScalar**)&d);
217: for (i=0; i<red->dcnt; i++) {
218: red->diag[i] = 1.0/d[red->drows[i]];
219: }
220: VecRestoreArray(diag,(PetscScalar**)&d);
221: VecDestroy(diag);
223: KSPSetUp(red->ksp);
224: return(0);
225: }
229: static PetscErrorCode PCApply_Redistribute(PC pc,Vec b,Vec x)
230: {
231: PC_Redistribute *red = (PC_Redistribute*)pc->data;
232: PetscErrorCode ierr;
233: PetscInt dcnt = red->dcnt,i;
234: const PetscInt *drows = red->drows;
235: PetscScalar *xwork;
236: const PetscScalar *bwork,*diag = red->diag;
239: if (!red->work) {
240: VecDuplicate(b,&red->work);
241: }
242: /* compute the rows of solution that have diagonal entries only */
243: VecSet(x,0.0); /* x = diag(A)^{-1} b */
244: VecGetArray(x,&xwork);
245: VecGetArray(b,(PetscScalar**)&bwork);
246: for (i=0; i<dcnt; i++) {
247: xwork[drows[i]] = diag[i]*bwork[drows[i]];
248: }
249: PetscLogFlops(dcnt);
250: VecRestoreArray(red->work,&xwork);
251: VecRestoreArray(b,(PetscScalar**)&bwork);
252: /* update the right hand side for the reduced system with diagonal rows (and corresponding columns) removed */
253: MatMult(pc->pmat,x,red->work);
254: VecAYPX(red->work,-1.0,b); /* red->work = b - A x */
256: VecScatterBegin(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
257: VecScatterEnd(red->scatter,red->work,red->b,INSERT_VALUES,SCATTER_FORWARD);
258: KSPSolve(red->ksp,red->b,red->x);
259: VecScatterBegin(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
260: VecScatterEnd(red->scatter,red->x,x,INSERT_VALUES,SCATTER_REVERSE);
261: return(0);
262: }
266: static PetscErrorCode PCDestroy_Redistribute(PC pc)
267: {
268: PC_Redistribute *red = (PC_Redistribute*)pc->data;
269: PetscErrorCode ierr;
272: if (red->scatter) {VecScatterDestroy(red->scatter);}
273: if (red->is) {ISDestroy(red->is);}
274: if (red->b) {VecDestroy(red->b);}
275: if (red->x) {VecDestroy(red->x);}
276: if (red->ksp) {KSPDestroy(red->ksp);}
277: if (red->work) {VecDestroy(red->work);}
278: PetscFree(red->drows);
279: PetscFree(red->diag);
280: PetscFree(red);
281: return(0);
282: }
286: static PetscErrorCode PCSetFromOptions_Redistribute(PC pc)
287: {
288: PetscErrorCode ierr;
289: PC_Redistribute *red = (PC_Redistribute*)pc->data;
292: KSPSetFromOptions(red->ksp);
293: return(0);
294: }
296: /* -------------------------------------------------------------------------------------*/
297: /*MC
298: PCREDISTRIBUTE - Redistributes a matrix for load balancing, removing the rows that only have a diagonal entry and then applys a KSP to that new matrix
300: Options for the redistribute preconditioners can be set with -redistribute_ksp_xxx <values> and -redistribute_pc_xxx <values>
302: Notes: Usually run this with -ksp_type preonly
304: If you have used MatZeroRows() to eliminate (for example, Dirichlet) boundary conditions for a symmetric problem then you can use, for example, -ksp_type preonly
305: -pc_type redistribute -redistribute_ksp_type cg -redistribute_pc_type bjacobi -redistribute_sub_pc_type icc to take advantage of the symmetry.
307: Level: intermediate
309: .seealso: PCCreate(), PCSetType(), PCType (for list of available types)
310: M*/
315: PetscErrorCode PCCreate_Redistribute(PC pc)
316: {
317: PetscErrorCode ierr;
318: PC_Redistribute *red;
319: const char *prefix;
320:
322: PetscNewLog(pc,PC_Redistribute,&red);
323: pc->data = (void*)red;
325: pc->ops->apply = PCApply_Redistribute;
326: pc->ops->applytranspose = 0;
327: pc->ops->setup = PCSetUp_Redistribute;
328: pc->ops->destroy = PCDestroy_Redistribute;
329: pc->ops->setfromoptions = PCSetFromOptions_Redistribute;
330: pc->ops->view = PCView_Redistribute;
332: KSPCreate(((PetscObject)pc)->comm,&red->ksp);
333: PetscObjectIncrementTabLevel((PetscObject)red->ksp,(PetscObject)pc,1);
334: PetscLogObjectParent(pc,red->ksp);
335: PCGetOptionsPrefix(pc,&prefix);
336: KSPSetOptionsPrefix(red->ksp,prefix);
337: KSPAppendOptionsPrefix(red->ksp,"redistribute_");
338: return(0);
339: }