Actual source code: neprefine.c

slepc-3.7.3 2016-09-29
Report Typos and Errors
  1: /*
  2:    Newton refinement for NEP, simple version.

  4:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  5:    SLEPc - Scalable Library for Eigenvalue Problem Computations
  6:    Copyright (c) 2002-2016, Universitat Politecnica de Valencia, Spain

  8:    This file is part of SLEPc.

 10:    SLEPc is free software: you can redistribute it and/or modify it under  the
 11:    terms of version 3 of the GNU Lesser General Public License as published by
 12:    the Free Software Foundation.

 14:    SLEPc  is  distributed in the hope that it will be useful, but WITHOUT  ANY
 15:    WARRANTY;  without even the implied warranty of MERCHANTABILITY or  FITNESS
 16:    FOR  A  PARTICULAR PURPOSE. See the GNU Lesser General Public  License  for
 17:    more details.

 19:    You  should have received a copy of the GNU Lesser General  Public  License
 20:    along with SLEPc. If not, see <http://www.gnu.org/licenses/>.
 21:    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 22: */

 24: #include <slepc/private/nepimpl.h>
 25: #include <slepcblaslapack.h>

 27: #define NREF_MAXIT 10

 29: typedef struct {
 30:   PetscSubcomm  subc;
 31:   VecScatter    *scatter_id,nst;
 32:   Mat           *A;
 33:   Vec           nv,vg,v,w;
 34:   FN            *fn;
 35: } NEPSimpNRefctx;

 37: typedef struct {
 38:   Mat          M1;
 39:   Vec          M2,M3;
 40:   PetscScalar  M4,m3;
 41: } FSubctx;

 45: static PetscErrorCode MatFSMult(Mat M ,Vec x,Vec y)
 46: {
 48:   FSubctx        *ctx;
 49:   PetscScalar    t;
 50:   
 52:   MatShellGetContext(M,&ctx);
 53:   VecDot(x,ctx->M3,&t);
 54:   t *= ctx->m3/ctx->M4;
 55:   MatMult(ctx->M1,x,y);
 56:   VecAXPY(y,-t,ctx->M2);
 57:   return(0);
 58: }

 62: static PetscErrorCode NEPSimpleNRefSetUp(NEP nep,NEPSimpNRefctx **ctx_)
 63: {
 65:   PetscInt       i,si,j,n0,m0,nloc,*idx1,*idx2,ne;
 66:   IS             is1,is2;
 67:   NEPSimpNRefctx *ctx;
 68:   Vec            v;
 69:   PetscMPIInt    rank,size;

 72:   PetscCalloc1(1,ctx_);
 73:   ctx = *ctx_;
 74:   if (nep->npart==1) {
 75:     ctx->A  = nep->A;
 76:     ctx->fn = nep->f;
 77:   } else {
 78:     PetscMalloc2(nep->nt,&ctx->A,nep->npart,&ctx->scatter_id);

 80:     /* Duplicate matrices */
 81:     for (i=0;i<nep->nt;i++) {
 82:       MatCreateRedundantMatrix(nep->A[i],0,PetscSubcommChild(ctx->subc),MAT_INITIAL_MATRIX,&ctx->A[i]);
 83:     }
 84:     MatCreateVecs(ctx->A[0],&ctx->v,NULL);

 86:     /* Duplicate FNs */
 87:     PetscMalloc1(nep->nt,&ctx->fn);
 88:     for (i=0;i<nep->nt;i++) {
 89:       FNDuplicate(nep->f[i],PetscSubcommChild(ctx->subc),&ctx->fn[i]);
 90:     }

 92:     /* Create scatters for sending vectors to each subcommucator */
 93:     BVGetColumn(nep->V,0,&v);
 94:     VecGetOwnershipRange(v,&n0,&m0);
 95:     BVRestoreColumn(nep->V,0,&v);
 96:     VecGetLocalSize(ctx->v,&nloc);
 97:     PetscMalloc2(m0-n0,&idx1,m0-n0,&idx2);
 98:     VecCreateMPI(PetscObjectComm((PetscObject)nep),nloc,PETSC_DECIDE,&ctx->vg);
 99:     for (si=0;si<nep->npart;si++) {
100:       j = 0;
101:       for (i=n0;i<m0;i++) {
102:         idx1[j]   = i;
103:         idx2[j++] = i+nep->n*si;
104:       }
105:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
106:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx2,PETSC_COPY_VALUES,&is2);
107:       BVGetColumn(nep->V,0,&v);
108:       VecScatterCreate(v,is1,ctx->vg,is2,&ctx->scatter_id[si]);
109:       BVRestoreColumn(nep->V,0,&v);
110:       ISDestroy(&is1);
111:       ISDestroy(&is2);
112:     }
113:     PetscFree2(idx1,idx2);
114:   }
115:   if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
116:     MPI_Comm_rank(PetscObjectComm((PetscObject)ctx->A[0]),&rank);
117:     MPI_Comm_size(PetscObjectComm((PetscObject)ctx->A[0]),&size);
118:     if (size>1) {
119:       if (nep->npart==1) {
120:         BVGetColumn(nep->V,0,&v);
121:       } else v = ctx->v;
122:       VecGetOwnershipRange(v,&n0,&m0);
123:       ne = (rank == size-1)?nep->n:0;
124:       VecCreateMPI(PetscObjectComm((PetscObject)ctx->A[0]),ne,PETSC_DECIDE,&ctx->nv);
125:       PetscMalloc1(m0-n0,&idx1);
126:       for (i=n0;i<m0;i++) {
127:         idx1[i-n0] = i;
128:       }
129:       ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
130:       VecScatterCreate(v,is1,ctx->nv,is1,&ctx->nst);
131:       if (nep->npart==1) {
132:         BVRestoreColumn(nep->V,0,&v);
133:       }
134:       PetscFree(idx1);
135:       ISDestroy(&is1);
136:     }
137:   }  return(0);  
138: }

140: /*
141:   Gather Eigenpair idx from subcommunicator with color sc
142: */
145: static PetscErrorCode NEPSimpleNRefGatherEigenpair(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx,PetscInt *fail)
146: {
148:   PetscMPIInt    nproc,p;
149:   MPI_Comm       comm=((PetscObject)nep)->comm;
150:   Vec            v;
151:   PetscScalar    *array;

154:   if (nep->npart>1) {
155:     MPI_Comm_size(comm,&nproc);
156:     p = (nproc/nep->npart)*sc+PetscMin(sc,nproc%nep->npart);
157:     /* Communicate convergence successful */
158:     MPI_Bcast(fail,1,MPIU_INT,p,comm);
159:     if (!(*fail)) {
160:       /* Process 0 of subcommunicator sc broadcasts the eigenvalue */ 
161:       MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
162:       /* Gather nep->V[idx] from the subcommuniator sc */
163:       BVGetColumn(nep->V,idx,&v);
164:       if (ctx->subc->color==sc) {
165:         VecGetArray(ctx->v,&array);
166:         VecPlaceArray(ctx->vg,array);
167:       }
168:       VecScatterBegin(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
169:       VecScatterEnd(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
170:       if (ctx->subc->color==sc) {
171:         VecResetArray(ctx->vg);
172:         VecRestoreArray(ctx->v,&array);
173:       }
174:       BVRestoreColumn(nep->V,idx,&v);
175:     }
176:   } else {
177:     if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT && !(*fail)) {
178:       MPI_Comm_size(comm,&nproc);
179:       p = (nproc/nep->npart)*sc+PetscMin(sc,nproc%nep->npart);
180:       MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
181:     }
182:   }
183:   return(0);
184: }

188: static PetscErrorCode NEPSimpleNRefScatterEigenvector(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx)
189: {
191:   Vec            v;
192:   PetscScalar    *array;
193:   
195:   if (nep->npart>1) {
196:     BVGetColumn(nep->V,idx,&v);
197:     if (ctx->subc->color==sc) {
198:       VecGetArray(ctx->v,&array);
199:       VecPlaceArray(ctx->vg,array);
200:     }
201:     VecScatterBegin(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
202:     VecScatterEnd(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
203:     if (ctx->subc->color==sc) {
204:       VecResetArray(ctx->vg);
205:       VecRestoreArray(ctx->v,&array);
206:     }
207:     BVRestoreColumn(nep->V,idx,&v);
208:   }
209:   return(0);
210: }

214: static PetscErrorCode NEPSimpleNRefSetUpSystem(NEP nep,NEPSimpNRefctx *ctx,Mat *A,PetscInt idx,Mat *Mt,Mat *T,Mat *P,PetscBool ini,Vec t,Vec v)
215: {
216:   PetscErrorCode    ierr;
217:   PetscInt          i,st,ml,m0,n0,m1,mg;
218:   PetscInt          *dnz,*onz,ncols,*cols2,*nnz,nt=nep->nt;
219:   PetscScalar       zero=0.0,*coeffs,*coeffs2;
220:   PetscMPIInt       rank,size;
221:   MPI_Comm          comm;
222:   const PetscInt    *cols;
223:   const PetscScalar *vals,*array;
224:   FSubctx           *fctx;
225:   Vec               w=ctx->w;
226:   Mat               M;

229:   PetscMalloc2(nt,&coeffs,nt,&coeffs2);
230:   switch (nep->scheme) {
231:   case NEP_REFINE_SCHEME_SCHUR:
232:     if (ini) {
233:       PetscCalloc1(1,&fctx);
234:       MatGetSize(A[0],&m0,&n0);
235:       MatCreateShell(PetscObjectComm((PetscObject)A[0]),PETSC_DECIDE,PETSC_DECIDE,m0,n0,fctx,T);
236:       MatShellSetOperation(*T,MATOP_MULT,(void(*)(void))MatFSMult);
237:     } else {
238:       MatShellGetContext(*T,&fctx);
239:     }    
240:     M=fctx->M1;
241:     break;
242:   case NEP_REFINE_SCHEME_MBE:
243:     M=*T;
244:     break;
245:   case NEP_REFINE_SCHEME_EXPLICIT:
246:     M=*Mt;
247:     break;
248:   }
249:   if (ini) {
250:     MatDuplicate(A[0],MAT_COPY_VALUES,&M);
251:   } else {
252:     MatCopy(A[0],M,DIFFERENT_NONZERO_PATTERN);
253:   }
254:   for (i=0;i<nt;i++) {
255:     FNEvaluateFunction(ctx->fn[i],nep->eigr[idx],coeffs+i);
256:   }
257:   if (coeffs[0]!=1.0) {
258:     MatScale(M,coeffs[0]);
259:   }
260:   for (i=1;i<nt;i++) {
261:     MatAXPY(M,coeffs[i],A[i],(ini)?nep->mstr:SUBSET_NONZERO_PATTERN);
262:   }
263:   for (i=0;i<nt;i++) {
264:     FNEvaluateDerivative(ctx->fn[i],nep->eigr[idx],coeffs2+i);
265:   }
266:   st = 0;
267:   for (i=0;i<nt && PetscAbsScalar(coeffs2[i])==0.0;i++) st++;
268:   MatMult(A[st],v,w);
269:   if (coeffs2[st]!=1.0) {
270:     VecScale(w,coeffs2[st]);
271:   }
272:   for (i=st+1;i<nt;i++) {
273:     MatMult(A[i],v,t);
274:     VecAXPY(w,coeffs2[i],t);
275:   }

277:   switch (nep->scheme) {
278:   case NEP_REFINE_SCHEME_EXPLICIT:
279:     comm = PetscObjectComm((PetscObject)A[0]);
280:     MPI_Comm_rank(comm,&rank);
281:     MPI_Comm_size(comm,&size);
282:     MatGetSize(M,&mg,NULL);
283:     MatGetOwnershipRange(M,&m0,&m1);
284:     if (ini) {
285:       MatCreate(comm,T);
286:       MatGetLocalSize(M,&ml,NULL);
287:       if (rank==size-1) ml++;
288:       MatSetSizes(*T,ml,ml,mg+1,mg+1);
289:       MatSetFromOptions(*T);
290:       MatSetUp(*T);
291:       /* Preallocate M */
292:       if (size>1) {
293:         MatPreallocateInitialize(comm,ml,ml,dnz,onz);
294:         for (i=m0;i<m1;i++) {
295:           MatGetRow(M,i,&ncols,&cols,NULL);
296:           MatPreallocateSet(i,ncols,cols,dnz,onz);
297:           MatPreallocateSet(i,1,&mg,dnz,onz);
298:           MatRestoreRow(M,i,&ncols,&cols,NULL);
299:         }
300:         if (rank==size-1) {
301:           PetscCalloc1(mg+1,&cols2);
302:           for (i=0;i<mg+1;i++) cols2[i]=i;
303:           MatPreallocateSet(m1,mg+1,cols2,dnz,onz);
304:           PetscFree(cols2);
305:         }
306:         MatMPIAIJSetPreallocation(*T,0,dnz,0,onz);
307:         MatPreallocateFinalize(dnz,onz);
308:       } else {
309:         PetscCalloc1(mg+1,&nnz);
310:         for (i=0;i<mg;i++) {
311:           MatGetRow(M,i,&ncols,NULL,NULL);
312:           nnz[i] = ncols+1;
313:           MatRestoreRow(M,i,&ncols,NULL,NULL);
314:         }
315:         nnz[mg] = mg+1;
316:         MatSeqAIJSetPreallocation(*T,0,nnz);
317:         PetscFree(nnz);
318:       }
319:       *Mt = M;
320:       *P  = *T;
321:     }
322:   
323:     /* Set values */
324:     VecGetArrayRead(w,&array);
325:     for (i=m0;i<m1;i++) {
326:       MatGetRow(M,i,&ncols,&cols,&vals);
327:       MatSetValues(*T,1,&i,ncols,cols,vals,INSERT_VALUES);
328:       MatRestoreRow(M,i,&ncols,&cols,&vals);
329:       MatSetValues(*T,1,&i,1,&mg,array+i-m0,INSERT_VALUES);
330:     }
331:     VecRestoreArrayRead(w,&array);
332:     VecConjugate(v);
333:     MPI_Comm_size(PetscObjectComm((PetscObject)A[0]),&size);
334:     MPI_Comm_rank(PetscObjectComm((PetscObject)A[0]),&rank);
335:     if (size>1) {
336:       if (rank==size-1) {
337:         PetscMalloc1(nep->n,&cols2);
338:         for (i=0;i<nep->n;i++) cols2[i]=i;
339:       }
340:       VecScatterBegin(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
341:       VecScatterEnd(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
342:       VecGetArrayRead(ctx->nv,&array);
343:       if (rank==size-1) {
344:         MatSetValues(*T,1,&mg,nep->n,cols2,array,INSERT_VALUES);
345:         MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
346:       }
347:       VecRestoreArrayRead(ctx->nv,&array);
348:     } else {  
349:       PetscMalloc1(m1-m0,&cols2);
350:       for (i=0;i<m1-m0;i++) cols2[i]=m0+i;
351:       VecGetArrayRead(v,&array);
352:       MatSetValues(*T,1,&mg,m1-m0,cols2,array,INSERT_VALUES);
353:       MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
354:       VecRestoreArrayRead(v,&array);
355:     }
356:     VecConjugate(v);
357:     MatAssemblyBegin(*T,MAT_FINAL_ASSEMBLY);
358:     MatAssemblyEnd(*T,MAT_FINAL_ASSEMBLY);  
359:     PetscFree(cols2);
360:     break; 
361:   case NEP_REFINE_SCHEME_SCHUR:
362:     fctx->M2 = ctx->w;
363:     fctx->M3 = v;
364:     fctx->m3 = 0.0;
365:     for (i=1;i<nt-1;i++) fctx->m3 += PetscConj(coeffs[i])*coeffs[i];
366:     fctx->M4 = 0.0;
367:     for (i=1;i<nt-1;i++) fctx->M4 += PetscConj(coeffs[i])*coeffs2[i];
368:     fctx->M1 = M;
369:     if (ini) {
370:       MatDuplicate(M,MAT_COPY_VALUES,P);
371:     } else {
372:       MatCopy(M,*P,SAME_NONZERO_PATTERN);
373:     }
374:     VecConjugate(v);
375:     VecPointwiseMult(t,v,w);
376:     VecConjugate(v);
377:     VecScale(t,-fctx->m3/fctx->M4);
378:     MatDiagonalSet(*P,t,ADD_VALUES);
379:     break;
380:   case NEP_REFINE_SCHEME_MBE:
381:     *T = M;
382:     *P = M;
383:     break;
384:   }
385:   PetscFree2(coeffs,coeffs2);
386:   return(0);
387: }

391: PetscErrorCode NEPNewtonRefinementSimple(NEP nep,PetscInt *maxits,PetscReal tol,PetscInt k)
392: {
393:   PetscErrorCode    ierr;
394:   PetscInt          i,n,its,idx=0,*idx_sc,*its_sc,color,*fail_sc;
395:   PetscMPIInt       rank,size;
396:   Mat               Mt=NULL,T=NULL,P=NULL;
397:   MPI_Comm          comm;
398:   Vec               r,v,dv,rr=NULL,dvv=NULL,t[2];
399:   const PetscScalar *array;
400:   PetscScalar       *array2,deig=0.0,tt[2],ttt;
401:   PetscReal         norm,error;
402:   PetscBool         ini=PETSC_TRUE,sc_pend,solved=PETSC_FALSE;
403:   NEPSimpNRefctx    *ctx;
404:   FSubctx            *fctx=NULL;
405:   KSPConvergedReason reason;

408:   PetscLogEventBegin(NEP_Refine,nep,0,0,0);
409:   NEPSimpleNRefSetUp(nep,&ctx);
410:   its = (maxits)?*maxits:NREF_MAXIT;
411:   comm = (nep->npart==1)?PetscObjectComm((PetscObject)nep):PetscSubcommChild(ctx->subc);
412:   if (!nep->refineksp) { NEPRefineGetKSP(nep,&nep->refineksp); }
413:   if (nep->npart==1) {
414:     BVGetColumn(nep->V,0,&v);
415:   } else v = ctx->v;
416:   VecDuplicate(v,&ctx->w);
417:   VecDuplicate(v,&r);
418:   VecDuplicate(v,&dv);
419:   VecDuplicate(v,&t[0]);
420:   VecDuplicate(v,&t[1]);
421:   if (nep->npart==1) { BVRestoreColumn(nep->V,0,&v); }
422:   MPI_Comm_size(comm,&size);
423:   MPI_Comm_rank(comm,&rank);
424:   VecGetLocalSize(r,&n);
425:   PetscMalloc3(nep->npart,&idx_sc,nep->npart,&its_sc,nep->npart,&fail_sc);
426:   for (i=0;i<nep->npart;i++) fail_sc[i] = 0;
427:   for (i=0;i<nep->npart;i++) its_sc[i] = 0;
428:   color = (nep->npart==1)?0:ctx->subc->color;
429:    
430:   /* Loop performing iterative refinements */
431:   while (!solved) {
432:     for (i=0;i<nep->npart;i++) {
433:       sc_pend = PETSC_TRUE;
434:       if (its_sc[i]==0) {
435:         idx_sc[i] = idx++;
436:         if (idx_sc[i]>=k) {
437:           sc_pend = PETSC_FALSE;
438:         } else {
439:           NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]);
440:         }
441:       }  else { /* Gather Eigenpair from subcommunicator i */
442:         NEPSimpleNRefGatherEigenpair(nep,ctx,i,idx_sc[i],&fail_sc[i]);
443:       }
444:       while (sc_pend) {
445:         if (!fail_sc[i]) {
446:           NEPComputeError(nep,idx_sc[i],NEP_ERROR_RELATIVE,&error);
447:         }
448:         if (error<=tol || its_sc[i]>=its || fail_sc[i]) {
449:           idx_sc[i] = idx++;
450:           its_sc[i] = 0;
451:           if (idx_sc[i]<k) { NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]); }
452:         } else {
453:           sc_pend = PETSC_FALSE;
454:           its_sc[i]++;
455:         }
456:         if (idx_sc[i]>=k) sc_pend = PETSC_FALSE;
457:       }
458:     }
459:     solved = PETSC_TRUE;
460:     for (i=0;i<nep->npart&&solved;i++) solved = PetscNot(idx_sc[i]<k);
461:     if (idx_sc[color]<k) {
462: #if !defined(PETSC_USE_COMPLEX)
463:       if (nep->eigi[idx_sc[color]]!=0.0) SETERRQ(PetscObjectComm((PetscObject)nep),1,"Simple Refinement not implemented in real scalar for complex eigenvalues");
464: #endif
465:       if (nep->npart==1) {
466:         BVGetColumn(nep->V,idx_sc[color],&v);
467:       } else v = ctx->v; 
468:       NEPSimpleNRefSetUpSystem(nep,ctx,ctx->A,idx_sc[color],&Mt,&T,&P,ini,t[0],v);
469:       KSPSetOperators(nep->refineksp,T,P);
470:       if (ini) {
471:         KSPSetFromOptions(nep->refineksp);
472:         if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
473:           MatCreateVecs(T,&dvv,NULL);
474:           VecDuplicate(dvv,&rr);
475:         }
476:         ini = PETSC_FALSE;
477:       }
478:       switch (nep->scheme) {
479:       case NEP_REFINE_SCHEME_EXPLICIT:
480:         MatMult(Mt,v,r);
481:         VecGetArrayRead(r,&array);
482:         if (rank==size-1) {
483:           VecGetArray(rr,&array2);
484:           PetscMemcpy(array2,array,n*sizeof(PetscScalar));
485:           array2[n] = 0.0;
486:           VecRestoreArray(rr,&array2);
487:         } else {
488:           VecPlaceArray(rr,array);
489:         }
490:         KSPSolve(nep->refineksp,rr,dvv);
491:         KSPGetConvergedReason(nep->refineksp,&reason);
492:         if (reason>0) {
493:           if (rank != size-1) {
494:             VecResetArray(rr);
495:           }
496:           VecRestoreArrayRead(r,&array);
497:           VecGetArrayRead(dvv,&array);
498:           VecPlaceArray(dv,array);
499:           VecAXPY(v,-1.0,dv);
500:           VecNorm(v,NORM_2,&norm);
501:           VecScale(v,1.0/norm);
502:           VecResetArray(dv);
503:           if (rank==size-1) nep->eigr[idx_sc[color]] -= array[n];
504:           VecRestoreArrayRead(dvv,&array);
505:         } else fail_sc[color] = 1;
506:         break;
507:       case NEP_REFINE_SCHEME_MBE:
508:         MatMult(T,v,r);
509:         /* Mixed block elimination */
510:         VecConjugate(v);
511:         KSPSolveTranspose(nep->refineksp,v,t[0]);
512:         KSPGetConvergedReason(nep->refineksp,&reason);
513:         if (reason>0) {
514:           VecConjugate(t[0]);
515:           VecDot(ctx->w,t[0],&tt[0]);
516:           KSPSolve(nep->refineksp,ctx->w,t[1]);
517:           KSPGetConvergedReason(nep->refineksp,&reason);
518:           if (reason>0) {
519:             VecDot(t[1],v,&tt[1]);
520:             VecDot(r,t[0],&ttt);
521:             tt[0] = ttt/tt[0];
522:             VecAXPY(r,-tt[0],ctx->w);
523:             KSPSolve(nep->refineksp,r,dv);
524:             KSPGetConvergedReason(nep->refineksp,&reason);
525:             if (reason>0) {
526:               VecDot(dv,v,&ttt);
527:               tt[1] = ttt/tt[1];
528:               VecAXPY(dv,-tt[1],t[1]);
529:               deig = tt[0]+tt[1];
530:             }
531:           }
532:           VecConjugate(v);
533:           VecAXPY(v,-1.0,dv);
534:           VecNorm(v,NORM_2,&norm);
535:           VecScale(v,1.0/norm);
536:           nep->eigr[idx_sc[color]] -= deig;
537:           fail_sc[color] = 0;
538:         } else {
539:           VecConjugate(v);
540:           fail_sc[color] = 1;
541:         }
542:         break;
543:       case NEP_REFINE_SCHEME_SCHUR:
544:         MatShellGetContext(T,&fctx);
545:         MatMult(fctx->M1,v,r);
546:         KSPSolve(nep->refineksp,r,dv);
547:         KSPGetConvergedReason(nep->refineksp,&reason);
548:         if (reason>0) {
549:           VecDot(dv,v,&deig);
550:           deig *= -fctx->m3/fctx->M4;
551:           VecAXPY(v,-1.0,dv);
552:           VecNorm(v,NORM_2,&norm);
553:           VecScale(v,1.0/norm);
554:           nep->eigr[idx_sc[color]] -= deig;
555:           fail_sc[color] = 0;
556:         } else fail_sc[color] = 1;
557:         break;
558:       }
559:       if (nep->npart==1) { BVRestoreColumn(nep->V,idx_sc[color],&v); }
560:     }
561:   }
562:   VecDestroy(&t[0]);
563:   VecDestroy(&t[1]);
564:   VecDestroy(&dv);
565:   VecDestroy(&ctx->w);
566:   VecDestroy(&r);
567:   PetscFree3(idx_sc,its_sc,fail_sc);
568:   VecScatterDestroy(&ctx->nst);
569:   if (nep->npart>1) {
570:     VecDestroy(&ctx->vg);
571:     VecDestroy(&ctx->v);
572:     for (i=0;i<nep->nt;i++) {
573:       MatDestroy(&ctx->A[i]);
574:     }
575:     for (i=0;i<nep->npart;i++) {
576:       VecScatterDestroy(&ctx->scatter_id[i]);
577:     }
578:     PetscFree2(ctx->A,ctx->scatter_id);
579:   }
580:   if (fctx && nep->scheme==NEP_REFINE_SCHEME_SCHUR) {
581:     MatDestroy(&P);
582:     MatDestroy(&fctx->M1);
583:     PetscFree(fctx);
584:   }
585:   if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
586:     MatDestroy(&Mt);
587:     VecDestroy(&dvv);
588:     VecDestroy(&rr);
589:     VecDestroy(&ctx->nv);
590:     if (nep->npart>1) {
591:       for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
592:       PetscFree(ctx->fn);
593:     }
594:   }
595:   MatDestroy(&T);
596:   PetscFree(ctx);
597:   PetscLogEventEnd(NEP_Refine,nep,0,0,0);
598:   return(0);
599: }