Actual source code: ml.c
1: #define PETSCKSP_DLL
3: /*
4: Provides an interface to the ML smoothed Aggregation
5: Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
6: Jed Brown, see [PETSC #18321, #18449].
7: */
8: #include private/pcimpl.h
9: #include ../src/ksp/pc/impls/mg/mgimpl.h
10: #include ../src/mat/impls/aij/seq/aij.h
11: #include ../src/mat/impls/aij/mpi/mpiaij.h
13: #include <math.h>
15: /* HAVE_CONFIG_H flag is required by ML include files */
16: #if !defined(HAVE_CONFIG_H)
17: #define HAVE_CONFIG_H
18: #endif
19: #include "ml_include.h"
22: /* The context (data structure) at each grid level */
23: typedef struct {
24: Vec x,b,r; /* global vectors */
25: Mat A,P,R;
26: KSP ksp;
27: } GridCtx;
29: /* The context used to input PETSc matrix into ML at fine grid */
30: typedef struct {
31: Mat A; /* Petsc matrix in aij format */
32: Mat Aloc; /* local portion of A to be used by ML */
33: Vec x,y;
34: ML_Operator *mlmat;
35: PetscScalar *pwork; /* tmp array used by PetscML_comm() */
36: } FineGridCtx;
38: /* The context associates a ML matrix with a PETSc shell matrix */
39: typedef struct {
40: Mat A; /* PETSc shell matrix associated with mlmat */
41: ML_Operator *mlmat; /* ML matrix assorciated with A */
42: Vec y;
43: } Mat_MLShell;
45: /* Private context for the ML preconditioner */
46: typedef struct {
47: ML *ml_object;
48: ML_Aggregate *agg_object;
49: GridCtx *gridctx;
50: FineGridCtx *PetscMLdata;
51: PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme;
52: PetscReal Threshold,DampingFactor;
53: PetscTruth SpectralNormScheme_Anorm;
54: PetscMPIInt size; /* size of communicator for pc->pmat */
55: } PC_ML;
59: static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
60: {
62: PetscInt m,i,j,k=0,row,*aj;
63: PetscScalar *aa;
64: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
65: Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data;
68: MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
69: for (i = 0; i<N_requested_rows; i++) {
70: row = requested_rows[i];
71: row_lengths[i] = a->ilen[row];
72: if (allocated_space < k+row_lengths[i]) return(0);
73: if ( (row >= 0) || (row <= (m-1)) ) {
74: aj = a->j + a->i[row];
75: aa = a->a + a->i[row];
76: for (j=0; j<row_lengths[i]; j++){
77: columns[k] = aj[j];
78: values[k++] = aa[j];
79: }
80: }
81: }
82: return(1);
83: }
87: static PetscErrorCode PetscML_comm(double p[],void *ML_data)
88: {
90: FineGridCtx *ml=(FineGridCtx*)ML_data;
91: Mat A=ml->A;
92: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
93: PetscMPIInt size;
94: PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
95: PetscScalar *array;
98: MPI_Comm_size(((PetscObject)A)->comm,&size);
99: if (size == 1) return 0;
100:
101: VecPlaceArray(ml->y,p);
102: VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
103: VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
104: VecResetArray(ml->y);
105: VecGetArray(a->lvec,&array);
106: for (i=in_length; i<out_length; i++){
107: p[i] = array[i-in_length];
108: }
109: VecRestoreArray(a->lvec,&array);
110: return(0);
111: }
115: static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116: {
118: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119: Mat A=ml->A, Aloc=ml->Aloc;
120: PetscMPIInt size;
121: PetscScalar *pwork=ml->pwork;
122: PetscInt i;
125: MPI_Comm_size(((PetscObject)A)->comm,&size);
126: if (size == 1){
127: VecPlaceArray(ml->x,p);
128: } else {
129: for (i=0; i<in_length; i++) pwork[i] = p[i];
130: PetscML_comm(pwork,ml);
131: VecPlaceArray(ml->x,pwork);
132: }
133: VecPlaceArray(ml->y,ap);
134: MatMult(Aloc,ml->x,ml->y);
135: VecResetArray(ml->x);
136: VecResetArray(ml->y);
137: return(0);
138: }
142: static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143: {
144: PetscErrorCode ierr;
145: Mat_MLShell *shell;
146: PetscScalar *xarray,*yarray;
147: PetscInt x_length,y_length;
148:
150: MatShellGetContext(A,(void **)&shell);
151: VecGetArray(x,&xarray);
152: VecGetArray(y,&yarray);
153: x_length = shell->mlmat->invec_leng;
154: y_length = shell->mlmat->outvec_leng;
155: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156: VecRestoreArray(x,&xarray);
157: VecRestoreArray(y,&yarray);
158: return(0);
159: }
163: static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164: {
165: PetscErrorCode ierr;
166: Mat_MLShell *shell;
167: PetscScalar *xarray,*yarray;
168: PetscInt x_length,y_length;
169:
171: MatShellGetContext(A,(void **)&shell);
172: VecGetArray(x,&xarray);
173: VecGetArray(y,&yarray);
174: x_length = shell->mlmat->invec_leng;
175: y_length = shell->mlmat->outvec_leng;
176: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
177: VecRestoreArray(x,&xarray);
178: VecRestoreArray(y,&yarray);
179: VecAXPY(y,1.0,w);
180: return(0);
181: }
183: /* newtype is ignored because "ml" is not listed under Petsc MatType */
186: static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
187: {
188: PetscErrorCode ierr;
189: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
190: Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
191: PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
192: PetscScalar *aa=a->a,*ba=b->a,*ca;
193: PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k;
194: PetscInt *ci,*cj,ncols;
197: if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
199: if (scall == MAT_INITIAL_MATRIX){
200: PetscMalloc((1+am)*sizeof(PetscInt),&ci);
201: ci[0] = 0;
202: for (i=0; i<am; i++){
203: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
204: }
205: PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
206: PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);
208: k = 0;
209: for (i=0; i<am; i++){
210: /* diagonal portion of A */
211: ncols = ai[i+1] - ai[i];
212: for (j=0; j<ncols; j++) {
213: cj[k] = *aj++;
214: ca[k++] = *aa++;
215: }
216: /* off-diagonal portion of A */
217: ncols = bi[i+1] - bi[i];
218: for (j=0; j<ncols; j++) {
219: cj[k] = an + (*bj); bj++;
220: ca[k++] = *ba++;
221: }
222: }
223: if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
225: /* put together the new matrix */
226: an = mpimat->A->cmap->n+mpimat->B->cmap->n;
227: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);
229: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
230: /* Since these are PETSc arrays, change flags to free them as necessary. */
231: mat = (Mat_SeqAIJ*)(*Aloc)->data;
232: mat->free_a = PETSC_TRUE;
233: mat->free_ij = PETSC_TRUE;
235: mat->nonew = 0;
236: } else if (scall == MAT_REUSE_MATRIX){
237: mat=(Mat_SeqAIJ*)(*Aloc)->data;
238: ci = mat->i; cj = mat->j; ca = mat->a;
239: for (i=0; i<am; i++) {
240: /* diagonal portion of A */
241: ncols = ai[i+1] - ai[i];
242: for (j=0; j<ncols; j++) *ca++ = *aa++;
243: /* off-diagonal portion of A */
244: ncols = bi[i+1] - bi[i];
245: for (j=0; j<ncols; j++) *ca++ = *ba++;
246: }
247: } else {
248: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
249: }
250: return(0);
251: }
256: static PetscErrorCode MatDestroy_ML(Mat A)
257: {
259: Mat_MLShell *shell;
262: MatShellGetContext(A,(void **)&shell);
263: VecDestroy(shell->y);
264: PetscFree(shell);
265: MatDestroy_Shell(A);
266: PetscObjectChangeTypeName((PetscObject)A,0);
267: return(0);
268: }
272: static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
273: {
274: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
275: PetscErrorCode ierr;
276: PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
277: PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
278: PetscScalar *ml_vals=matdata->values,*aa;
279:
281: if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
282: if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
283: if (reuse){
284: Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
285: aij->i = ml_rowptr;
286: aij->j = ml_cols;
287: aij->a = ml_vals;
288: } else {
289: /* sort ml_cols and ml_vals */
290: PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
291: for (i=0; i<m; i++) {
292: nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
293: }
294: aj = ml_cols; aa = ml_vals;
295: for (i=0; i<m; i++){
296: PetscSortIntWithScalarArray(nnz[i],aj,aa);
297: aj += nnz[i]; aa += nnz[i];
298: }
299: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);
300: PetscFree(nnz);
301: }
302: return(0);
303: }
305: /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
306: MatCreate(PETSC_COMM_SELF,newmat);
307: MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
308: MatSetType(*newmat,MATSEQAIJ);
310: PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
311: nz_max = 1;
312: for (i=0; i<m; i++) {
313: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
314: if (nnz[i] > nz_max) nz_max += nnz[i];
315: }
317: MatSeqAIJSetPreallocation(*newmat,0,nnz);
318: PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
319: for (i=0; i<m; i++){
320: k = 0;
321: /* diagonal entry */
322: aj[k] = i; aa[k++] = ml_vals[i];
323: /* off diagonal entries */
324: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
325: aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
326: }
327: /* sort aj and aa */
328: PetscSortIntWithScalarArray(nnz[i],aj,aa);
329: MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);
330: }
331: MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
332: MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
334: PetscFree2(aa,aj);
335: PetscFree(nnz);
336: return(0);
337: }
341: static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
342: {
344: PetscInt m,n;
345: ML_Comm *MLcomm;
346: Mat_MLShell *shellctx;
349: m = mlmat->outvec_leng;
350: n = mlmat->invec_leng;
351: if (!m || !n){
352: newmat = PETSC_NULL;
353: return(0);
354: }
356: if (reuse){
357: MatShellGetContext(*newmat,(void **)&shellctx);
358: shellctx->mlmat = mlmat;
359: return(0);
360: }
362: MLcomm = mlmat->comm;
363: PetscNew(Mat_MLShell,&shellctx);
364: MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
365: MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
366: MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
367: shellctx->A = *newmat;
368: shellctx->mlmat = mlmat;
369: VecCreate(PETSC_COMM_WORLD,&shellctx->y);
370: VecSetSizes(shellctx->y,m,PETSC_DECIDE);
371: VecSetFromOptions(shellctx->y);
372: (*newmat)->ops->destroy = MatDestroy_ML;
373: return(0);
374: }
378: static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
379: {
380: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
381: PetscInt *ml_cols=matdata->columns,*aj;
382: PetscScalar *ml_vals=matdata->values,*aa;
383: PetscErrorCode ierr;
384: PetscInt i,j,k,*gordering;
385: PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
386: Mat A;
389: if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
390: n = mlmat->invec_leng;
391: if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
393: MatCreate(mlmat->comm->USR_comm,&A);
394: MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
395: MatSetType(A,MATMPIAIJ);
396: PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);
397:
398: nz_max = 0;
399: for (i=0; i<m; i++){
400: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
401: if (nz_max < nnz[i]) nz_max = nnz[i];
402: nnzA[i] = 1; /* diag */
403: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
404: if (ml_cols[j] < m) nnzA[i]++;
405: }
406: nnzB[i] = nnz[i] - nnzA[i];
407: }
408: MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
410: /* insert mat values -- remap row and column indices */
411: nz_max++;
412: PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);
413: /* create global row numbering for a ML_Operator */
414: ML_build_global_numbering(mlmat,&gordering,"rows");
415: for (i=0; i<m; i++){
416: row = gordering[i];
417: k = 0;
418: /* diagonal entry */
419: aj[k] = row; aa[k++] = ml_vals[i];
420: /* off diagonal entries */
421: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
422: aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
423: }
424: MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);
425: }
426: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
427: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
428: *newmat = A;
430: PetscFree3(nnzA,nnzB,nnz);
431: PetscFree2(aa,aj);
432: return(0);
433: }
435: /* -----------------------------------------------------------------------------*/
438: PetscErrorCode PCDestroy_ML_Private(void *ptr)
439: {
440: PetscErrorCode ierr;
441: PC_ML *pc_ml = (PC_ML*)ptr;
442: PetscInt level,fine_level=pc_ml->Nlevels-1;
445: ML_Aggregate_Destroy(&pc_ml->agg_object);
446: ML_Destroy(&pc_ml->ml_object);
448: if (pc_ml->PetscMLdata) {
449: PetscFree(pc_ml->PetscMLdata->pwork);
450: if (pc_ml->size > 1) {MatDestroy(pc_ml->PetscMLdata->Aloc);}
451: if (pc_ml->PetscMLdata->x){VecDestroy(pc_ml->PetscMLdata->x);}
452: if (pc_ml->PetscMLdata->y){VecDestroy(pc_ml->PetscMLdata->y);}
453: }
454: PetscFree(pc_ml->PetscMLdata);
456: for (level=0; level<fine_level; level++){
457: if (pc_ml->gridctx[level].A){MatDestroy(pc_ml->gridctx[level].A);}
458: if (pc_ml->gridctx[level].P){MatDestroy(pc_ml->gridctx[level].P);}
459: if (pc_ml->gridctx[level].R){MatDestroy(pc_ml->gridctx[level].R);}
460: if (pc_ml->gridctx[level].x){VecDestroy(pc_ml->gridctx[level].x);}
461: if (pc_ml->gridctx[level].b){VecDestroy(pc_ml->gridctx[level].b);}
462: if (pc_ml->gridctx[level+1].r){VecDestroy(pc_ml->gridctx[level+1].r);}
463: }
464: PetscFree(pc_ml->gridctx);
465: return(0);
466: }
467: /* -------------------------------------------------------------------------- */
468: /*
469: PCSetUp_ML - Prepares for the use of the ML preconditioner
470: by setting data structures and options.
472: Input Parameter:
473: . pc - the preconditioner context
475: Application Interface Routine: PCSetUp()
477: Notes:
478: The interface routine PCSetUp() is not usually called directly by
479: the user, but instead is called by PCApply() if necessary.
480: */
486: PetscErrorCode PCSetUp_ML(PC pc)
487: {
488: PetscErrorCode ierr;
489: PetscMPIInt size;
490: FineGridCtx *PetscMLdata;
491: ML *ml_object;
492: ML_Aggregate *agg_object;
493: ML_Operator *mlmat;
494: PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
495: Mat A,Aloc;
496: GridCtx *gridctx;
497: PC_MG *mg = (PC_MG*)pc->data;
498: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
499: PetscTruth isSeq, isMPI;
500: KSP smoother;
501: PC subpc;
504: if (pc->setupcalled){
505: /* since ML can change the size of vectors/matrices at any level we must destroy everything */
506: PCDestroy_ML_Private(pc_ml);
507: PCDestroy_MG_Private(pc);
508: }
509:
510: /* setup special features of PCML */
511: /*--------------------------------*/
512: /* covert A to Aloc to be used by ML at fine grid */
513: A = pc->pmat;
514: MPI_Comm_size(((PetscObject)A)->comm,&size);
515: pc_ml->size = size;
516: PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);
517: PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);
518: if (isMPI){
519: MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);
520: } else if (isSeq) {
521: Aloc = A;
522: } else {
523: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices.");
524: }
526: /* create and initialize struct 'PetscMLdata' */
527: PetscNewLog(pc,FineGridCtx,&PetscMLdata);
528: pc_ml->PetscMLdata = PetscMLdata;
529: PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);
531: VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
532: VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);
533: VecSetType(PetscMLdata->x,VECSEQ);
535: VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
536: VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);
537: VecSetType(PetscMLdata->y,VECSEQ);
538: PetscMLdata->A = A;
539: PetscMLdata->Aloc = Aloc;
540:
541: /* create ML discretization matrix at fine grid */
542: /* ML requires input of fine-grid matrix. It determines nlevels. */
543: MatGetSize(Aloc,&m,&nlocal_allcols);
544: MatGetBlockSize(A,&bs);
545: ML_Create(&ml_object,pc_ml->MaxNlevels);
546: pc_ml->ml_object = ml_object;
547: ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
548: ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
549: ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
550:
551: /* aggregation */
552: ML_Aggregate_Create(&agg_object);
553: pc_ml->agg_object = agg_object;
555: ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);
556: ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
557: /* set options */
558: switch (pc_ml->CoarsenScheme) {
559: case 1:
560: ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
561: case 2:
562: ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
563: case 3:
564: ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
565: }
566: ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
567: ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
568: if (pc_ml->SpectralNormScheme_Anorm){
569: ML_Set_SpectralNormScheme_Anorm(ml_object);
570: }
572: Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
573: if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
574: pc_ml->Nlevels = Nlevels;
575: fine_level = Nlevels - 1;
577: PCMGSetLevels(pc,Nlevels,PETSC_NULL);
578: /* set default smoothers */
579: for (level=1; level<=fine_level; level++){
580: if (size == 1){
581: PCMGGetSmoother(pc,level,&smoother);
582: KSPSetType(smoother,KSPRICHARDSON);
583: KSPGetPC(smoother,&subpc);
584: PCSetType(subpc,PCSOR);
585: } else {
586: PCMGGetSmoother(pc,level,&smoother);
587: KSPSetType(smoother,KSPRICHARDSON);
588: KSPGetPC(smoother,&subpc);
589: PCSetType(subpc,PCSOR);
590: }
591: }
592: PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
593:
594: PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
595: pc_ml->gridctx = gridctx;
597: /* wrap ML matrices by PETSc shell matrices at coarsened grids.
598: Level 0 is the finest grid for ML, but coarsest for PETSc! */
599: gridctx[fine_level].A = A;
600:
601: level = fine_level - 1;
602: if (size == 1){ /* convert ML P, R and A into seqaij format */
603: for (mllevel=1; mllevel<Nlevels; mllevel++){
604: mlmat = &(ml_object->Pmat[mllevel]);
605: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
606: mlmat = &(ml_object->Rmat[mllevel-1]);
607: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
608:
609: mlmat = &(ml_object->Amat[mllevel]);
610: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
611: level--;
612: }
613: } else { /* convert ML P and R into shell format, ML A into mpiaij format */
614: for (mllevel=1; mllevel<Nlevels; mllevel++){
615: mlmat = &(ml_object->Pmat[mllevel]);
616: MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);
617: mlmat = &(ml_object->Rmat[mllevel-1]);
618: MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);
620: mlmat = &(ml_object->Amat[mllevel]);
621: MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);
622: level--;
623: }
624: }
626: /* create vectors and ksp at all levels */
627: for (level=0; level<fine_level; level++){
628: level1 = level + 1;
629: VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);
630: VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);
631: VecSetType(gridctx[level].x,VECMPI);
632: PCMGSetX(pc,level,gridctx[level].x);
633:
634: VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);
635: VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);
636: VecSetType(gridctx[level].b,VECMPI);
637: PCMGSetRhs(pc,level,gridctx[level].b);
638:
639: VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);
640: VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);
641: VecSetType(gridctx[level1].r,VECMPI);
642: PCMGSetR(pc,level1,gridctx[level1].r);
644: if (level == 0){
645: PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
646: } else {
647: PCMGGetSmoother(pc,level,&gridctx[level].ksp);
648: }
649: }
650: PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);
652: /* create coarse level and the interpolation between the levels */
653: for (level=0; level<fine_level; level++){
654: level1 = level + 1;
655: PCMGSetInterpolation(pc,level1,gridctx[level].P);
656: PCMGSetRestriction(pc,level1,gridctx[level].R);
657: if (level > 0){
658: PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
659: }
660: KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
661: }
662: PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
663: KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);
665: /* setupcalled is set to 0 so that MG is setup from scratch */
666: pc->setupcalled = 0;
667: PCSetUp_MG(pc);
668: return(0);
669: }
671: /* -------------------------------------------------------------------------- */
672: /*
673: PCDestroy_ML - Destroys the private context for the ML preconditioner
674: that was created with PCCreate_ML().
676: Input Parameter:
677: . pc - the preconditioner context
679: Application Interface Routine: PCDestroy()
680: */
683: PetscErrorCode PCDestroy_ML(PC pc)
684: {
685: PetscErrorCode ierr;
686: PC_MG *mg = (PC_MG*)pc->data;
687: PC_ML *pc_ml= (PC_ML*)mg->innerctx;
690: PCDestroy_ML_Private(pc_ml);
691: PetscFree(pc_ml);
692: PCDestroy_MG(pc);
693: return(0);
694: }
698: PetscErrorCode PCSetFromOptions_ML(PC pc)
699: {
700: PetscErrorCode ierr;
701: PetscInt indx,PrintLevel;
702: const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
703: PC_MG *mg = (PC_MG*)pc->data;
704: PC_ML *pc_ml = (PC_ML*)mg->innerctx;
707: PetscOptionsHead("ML options");
708: PrintLevel = 0;
709: indx = 0;
710: PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
711: ML_Set_PrintLevel(PrintLevel);
712: PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
713: PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
714: PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);
715: pc_ml->CoarsenScheme = indx;
716: PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
717: PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);
718: PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
719: PetscOptionsTail();
720: return(0);
721: }
723: /* -------------------------------------------------------------------------- */
724: /*
725: PCCreate_ML - Creates a ML preconditioner context, PC_ML,
726: and sets this as the private data within the generic preconditioning
727: context, PC, that was created within PCCreate().
729: Input Parameter:
730: . pc - the preconditioner context
732: Application Interface Routine: PCCreate()
733: */
735: /*MC
736: PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
737: fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
738: operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
739: and the restriction/interpolation operators wrapped as PETSc shell matrices.
741: Options Database Key:
742: Multigrid options(inherited)
743: + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
744: . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
745: . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
746: - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
747:
748: ML options:
749: + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
750: . -pc_ml_maxNlevels <10>: Maximum number of levels (None)
751: . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
752: . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
753: . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
754: . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
755: - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
757: Level: intermediate
759: Concepts: multigrid
760:
761: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
762: PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
763: PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
764: PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
765: PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
766: M*/
771: PetscErrorCode PCCreate_ML(PC pc)
772: {
773: PetscErrorCode ierr;
774: PC_ML *pc_ml;
775: PC_MG *mg;
778: /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
779: PetscObjectChangeTypeName((PetscObject)pc,PCML);
780: PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
782: /* create a supporting struct and attach it to pc */
783: PetscNewLog(pc,PC_ML,&pc_ml);
784: mg = (PC_MG*)pc->data;
785: mg->innerctx = pc_ml;
786:
787: pc_ml->ml_object = 0;
788: pc_ml->agg_object = 0;
789: pc_ml->gridctx = 0;
790: pc_ml->PetscMLdata = 0;
791: pc_ml->Nlevels = -1;
792: pc_ml->MaxNlevels = 10;
793: pc_ml->MaxCoarseSize = 1;
794: pc_ml->CoarsenScheme = 1;
795: pc_ml->Threshold = 0.0;
796: pc_ml->DampingFactor = 4.0/3.0;
797: pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
798: pc_ml->size = 0;
800: /* overwrite the pointers of PCMG by the functions of PCML */
801: pc->ops->setfromoptions = PCSetFromOptions_ML;
802: pc->ops->setup = PCSetUp_ML;
803: pc->ops->destroy = PCDestroy_ML;
804: return(0);
805: }