Actual source code: adda.c
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DA object to any number of dimensions.
7: */
8: #include ../src/dm/adda/addaimpl.h
12: /*@C
13: ADDACreate - Creates and ADDA object that translate between coordinates
14: in a geometric grid of arbitrary dimension and data in a PETSc vector
15: distributed on several processors.
17: Collective on MPI_Comm
19: Input Parameters:
20: + comm - MPI communicator
21: . dim - the dimension of the grid
22: . nodes - array with d entries that give the number of nodes in each dimension
23: . procs - array with d entries that give the number of processors in each dimension
24: (or PETSC_NULL if to be determined automatically)
25: . dof - number of degrees of freedom per node
26: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
28: Output Parameters:
29: . adda - pointer to ADDA data structure that is created
31: Level: intermediate
33: @*/
34: PetscErrorCode ADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,
35: PetscInt dof, PetscTruth *periodic,ADDA *adda_p)
36: {
38: ADDA adda;
39: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
40: PetscMPIInt rank,size;
41: PetscInt i;
42: PetscInt nodes_total;
43: PetscInt nodesleft;
44: PetscInt procsleft;
45: PetscInt procsdimi;
46: PetscInt ranki;
47: PetscInt rpq;
52: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
53: DMInitializePackage(PETSC_NULL);
54: #endif
56: PetscHeaderCreate(*adda_p,_p_ADDA,struct _ADDAOps,ADDA_COOKIE,0,"ADDA",comm,ADDADestroy,0);
57: adda = *adda_p;
58: adda->ops->view = ADDAView;
59: adda->ops->createglobalvector = ADDACreateGlobalVector;
60: adda->ops->getcoloring = ADDAGetColoring;
61: adda->ops->getmatrix = ADDAGetMatrix;
62: adda->ops->getinterpolation = ADDAGetInterpolation;
63: adda->ops->refine = ADDARefine;
64: adda->ops->coarsen = ADDACoarsen;
65: adda->ops->getinjection = ADDAGetInjection;
66: adda->ops->getaggregates = ADDAGetAggregates;
67:
68: MPI_Comm_size(comm,&size);
69: MPI_Comm_rank(comm,&rank);
70:
71: adda->dim = dim;
72: adda->dof = dof;
74: /* nodes */
75: PetscMalloc(dim*sizeof(PetscInt), &(adda->nodes));
76: PetscMemcpy(adda->nodes, nodes, dim*sizeof(PetscInt));
77: /* total number of nodes */
78: nodes_total = 1;
79: for(i=0; i<dim; i++) nodes_total *= nodes[i];
81: /* procs */
82: PetscMalloc(dim*sizeof(PetscInt), &(adda->procs));
83: /* create distribution of nodes to processors */
84: if(procs == PETSC_NULL) {
85: procs = adda->procs;
86: nodesleft = nodes_total;
87: procsleft = size;
88: /* figure out a good way to split the array to several processors */
89: for(i=0; i<dim; i++) {
90: if(i==dim-1) {
91: procs[i] = procsleft;
92: } else {
93: /* calculate best partition */
94: procs[i] = (PetscInt)(((double) nodes[i])*pow(((double) procsleft)/((double) nodesleft),1./((double)(dim-i)))+0.5);
95: if(procs[i]<1) procs[i]=1;
96: while( procs[i] > 0 ) {
97: if( procsleft % procs[i] )
98: procs[i]--;
99: else
100: break;
101: }
102: nodesleft /= nodes[i];
103: procsleft /= procs[i];
104: }
105: }
106: } else {
107: /* user provided the number of processors */
108: PetscMemcpy(adda->procs, procs, dim*sizeof(PetscInt));
109: }
110: /* check for validity */
111: procsleft = 1;
112: for(i=0; i<dim; i++) {
113: if (nodes[i] < procs[i]) {
114: SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
115: }
116: procsleft *= procs[i];
117: }
118: if(procsleft != size) {
119: SETERRQ(1, "Created or was provided with inconsistent distribution of processors");
120: }
122: /* periodicity */
123: adda->periodic = periodic;
124:
125: /* find out local region */
126: PetscMalloc(dim*sizeof(PetscInt), &(adda->lcs));
127: PetscMalloc(dim*sizeof(PetscInt), &(adda->lce));
128: procsdimi=size;
129: ranki=rank;
130: for(i=0; i<dim; i++) {
131: /* What is the number of processor for dimensions i+1, ..., dim-1? */
132: procsdimi /= procs[i];
133: /* these are all nodes that come before our region */
134: rpq = ranki / procsdimi;
135: adda->lcs[i] = rpq * (nodes[i]/procs[i]);
136: if( rpq + 1 < procs[i] ) {
137: adda->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
138: } else {
139: /* last one gets all the rest */
140: adda->lce[i] = nodes[i];
141: }
142: ranki = ranki - rpq*procsdimi;
143: }
144:
145: /* compute local size */
146: adda->lsize=1;
147: for(i=0; i<dim; i++) {
148: adda->lsize *= (adda->lce[i]-adda->lcs[i]);
149: }
150: adda->lsize *= dof;
152: /* find out ghost points */
153: PetscMalloc(dim*sizeof(PetscInt), &(adda->lgs));
154: PetscMalloc(dim*sizeof(PetscInt), &(adda->lge));
155: for(i=0; i<dim; i++) {
156: if( periodic[i] ) {
157: adda->lgs[i] = adda->lcs[i] - s;
158: adda->lge[i] = adda->lce[i] + s;
159: } else {
160: adda->lgs[i] = PetscMax(adda->lcs[i] - s, 0);
161: adda->lge[i] = PetscMin(adda->lce[i] + s, nodes[i]);
162: }
163: }
164:
165: /* compute local size with ghost points */
166: adda->lgsize=1;
167: for(i=0; i<dim; i++) {
168: adda->lgsize *= (adda->lge[i]-adda->lgs[i]);
169: }
170: adda->lgsize *= dof;
172: /* create global and local prototype vector */
173: VecCreateMPIWithArray(comm,adda->lsize,PETSC_DECIDE,0,&(adda->global));
174: VecSetBlockSize(adda->global,adda->dof);
175: #if ADDA_NEEDS_LOCAL_VECTOR
176: /* local includes ghost points */
177: VecCreateSeqWithArray(PETSC_COMM_SELF,adda->lgsize,0,&(adda->local));
178: VecSetBlockSize(adda->local,dof);
179: #endif
181: PetscMalloc(dim*sizeof(PetscInt), &(adda->refine));
182: for(i=0; i<dim; i++) adda->refine[i] = 3;
183: adda->dofrefine = 1;
185: return(0);
186: }
190: /*@
191: ADDADestroy - Destroys a distributed array.
193: Collective on ADDA
195: Input Parameter:
196: . adda - the distributed array to destroy
198: Level: beginner
200: .keywords: distributed array, destroy
202: .seealso: ADDACreate()
203: @*/
204: PetscErrorCode ADDADestroy(ADDA adda)
205: {
210: /* check reference count */
211: if(--((PetscObject)adda)->refct > 0) return(0);
213: /* destroy the allocated data */
214: PetscFree(adda->nodes);
215: PetscFree(adda->procs);
216: PetscFree(adda->lcs);
217: PetscFree(adda->lce);
218: PetscFree(adda->lgs);
219: PetscFree(adda->lge);
220: PetscFree(adda->refine);
222: VecDestroy(adda->global);
224: PetscHeaderDestroy(adda);
225: return(0);
226: }
230: /*@
231: ADDAView - Views a distributed array.
233: Collective on ADDA
235: Input Parameter:
236: + adda - the ADDA object to view
237: - v - the viewer
239: Level: developer
241: .keywords: distributed array, view
243: .seealso: DMView()
244: @*/
245: PetscErrorCode ADDAView(ADDA adda, PetscViewer v) {
247: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
248: return(0);
249: }
253: /*@
254: ADDACreateGlobalVector - Creates global vector for distributed array.
256: Collective on ADDA
258: Input Parameter:
259: . adda - the distributed array for which we create a global vector
261: Output Parameter:
262: . vec - the global vector
264: Level: beginner
266: .keywords: distributed array, vector
268: .seealso: DMCreateGlobalVector()
269: @*/
270: PetscErrorCode ADDACreateGlobalVector(ADDA adda, Vec *vec) {
275: VecDuplicate(adda->global, vec);
276: return(0);
277: }
281: /*@
282: ADDAGetColoring - Creates coloring for distributed array.
284: Collective on ADDA
286: Input Parameter:
287: + adda - the distributed array for which we create a global vector
288: - ctype - IS_COLORING_GHOSTED or IS_COLORING_LOCAL
290: Output Parameter:
291: . coloring - the coloring
293: Level: developer
295: .keywords: distributed array, coloring
297: .seealso: DMGetColoring()
298: @*/
299: PetscErrorCode ADDAGetColoring(ADDA adda, ISColoringType ctype,const MatType mtype,ISColoring *coloring) {
301: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
302: return(0);
303: }
307: /*@
308: ADDAGetMatrix - Creates matrix compatible with distributed array.
310: Collective on ADDA
312: Input Parameter:
313: . adda - the distributed array for which we create the matrix
314: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
315: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
317: Output Parameter:
318: . mat - the empty Jacobian
320: Level: beginner
322: .keywords: distributed array, matrix
324: .seealso: DMGetMatrix()
325: @*/
326: PetscErrorCode ADDAGetMatrix(ADDA adda, const MatType mtype, Mat *mat) {
330: MatCreate(((PetscObject)adda)->comm, mat);
331: MatSetSizes(*mat, adda->lsize, adda->lsize, PETSC_DECIDE, PETSC_DECIDE);
332: MatSetType(*mat, mtype);
333: return(0);
334: }
338: /*@
339: ADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
341: Collective on ADDA
343: Input Parameter:
344: . addar - the distributed array for which we create the matrix, which indexes the rows
345: . addac - the distributed array for which we create the matrix, which indexes the columns
346: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
347: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
349: Output Parameter:
350: . mat - the empty Jacobian
352: Level: beginner
354: .keywords: distributed array, matrix
356: .seealso: DMGetMatrix()
357: @*/
358: PetscErrorCode ADDAGetMatrixNS(ADDA addar, ADDA addac, const MatType mtype, Mat *mat) {
364: MatCreate(((PetscObject)addar)->comm, mat);
365: MatSetSizes(*mat, addar->lsize, addac->lsize, PETSC_DECIDE, PETSC_DECIDE);
366: MatSetType(*mat, mtype);
367: return(0);
368: }
372: /*@
373: ADDAGetInterpolation - Gets interpolation matrix between two ADDA objects
375: Collective on ADDA
377: Input Parameter:
378: + adda1 - the fine ADDA object
379: - adda2 - the second, coarser ADDA object
381: Output Parameter:
382: + mat - the interpolation matrix
383: - vec - the scaling (optional)
385: Level: developer
387: .keywords: distributed array, interpolation
389: .seealso: DMGetInterpolation()
390: @*/
391: PetscErrorCode ADDAGetInterpolation(ADDA adda1,ADDA adda2,Mat *mat,Vec *vec) {
393: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
394: return(0);
395: }
399: /*@
400: ADDARefine - Refines a distributed array.
402: Collective on ADDA
404: Input Parameter:
405: + adda - the distributed array to refine
406: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
408: Output Parameter:
409: . addaf - the refined ADDA
411: Level: developer
413: .keywords: distributed array, refine
415: .seealso: DMRefine()
416: @*/
417: PetscErrorCode ADDARefine(ADDA adda, MPI_Comm comm, ADDA *addaf) {
419: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
420: return(0);
421: }
425: /*@
426: ADDACoarsen - Coarsens a distributed array.
428: Collective on ADDA
430: Input Parameter:
431: + adda - the distributed array to coarsen
432: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
434: Output Parameter:
435: . addac - the coarsened ADDA
437: Level: developer
439: .keywords: distributed array, coarsen
441: .seealso: DMCoarsen()
442: @*/
443: PetscErrorCode ADDACoarsen(ADDA adda, MPI_Comm comm,ADDA *addac) {
445: PetscInt *nodesc;
446: PetscInt dofc;
447: PetscInt i;
451: PetscMalloc(adda->dim*sizeof(PetscInt), &nodesc);
452: for(i=0; i<adda->dim; i++) {
453: nodesc[i] = (adda->nodes[i] % adda->refine[i]) ? adda->nodes[i] / adda->refine[i] + 1 : adda->nodes[i] / adda->refine[i];
454: }
455: dofc = (adda->dof % adda->dofrefine) ? adda->dof / adda->dofrefine + 1 : adda->dof / adda->dofrefine;
456: ADDACreate(((PetscObject)adda)->comm, adda->dim, nodesc, adda->procs, dofc, adda->periodic, addac);
457: PetscFree(nodesc);
458: /* copy refinement factors */
459: ADDASetRefinement(*addac, adda->refine, adda->dofrefine);
460: return(0);
461: }
465: /*@
466: ADDAGetInjection - Gets injection between distributed arrays.
468: Collective on ADDA
470: Input Parameter:
471: + adda1 - the fine ADDA object
472: - adda2 - the second, coarser ADDA object
474: Output Parameter:
475: . ctx - the injection
477: Level: developer
479: .keywords: distributed array, injection
481: .seealso: DMGetInjection()
482: @*/
483: PetscErrorCode ADDAGetInjection(ADDA adda1, ADDA adda2, VecScatter *ctx) {
485: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
486: return(0);
487: }
489: /*@C
490: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
491: lc, uc, idx all have to be valid arrays of size dim
492: This function sets idx to lc and then checks, whether the lower corner (lc) is less
493: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
494: and PETSC_FALSE otherwise.
495:
496: Input Parameters:
497: + dim - the number of dimension
498: . lc - the "lower" corner
499: - uc - the "upper" corner
501: Output Parameters:
502: . idx - the index that this function increases
504: Level: developer
505: @*/
506: PetscTruth ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
508: PetscInt i;
510: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
511: if(ierr) {
512: PetscError(__LINE__,__FUNCT__,__FILE__,__SDIR__,ierr,0," ");
513: return PETSC_FALSE;
514: }
515: for(i=0; i<dim; i++) {
516: if( lc[i] > uc[i] ) {
517: return PETSC_FALSE;
518: }
519: }
520: return PETSC_TRUE;
521: }
523: /*@C
524: ADDAHCiter - iterates through a hypercube
525: lc, uc, idx all have to be valid arrays of size dim
526: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
527: There are no guarantees on what happens if idx is not in the hypercube
528: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
529:
530: Use this code as follows:
531: if( ADDAHCiterStartup(dim, lc, uc, idx) ) {
532: do {
533: ...
534: } while( ADDAHCiter(dim, lc, uc, idx) );
535: }
536:
537: Input Parameters:
538: + dim - the number of dimension
539: . lc - the "lower" corner
540: - uc - the "upper" corner
542: Output Parameters:
543: . idx - the index that this function increases
545: Level: developer
546: @*/
547: PetscTruth ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
548: PetscInt i;
549: for(i=dim-1; i>=0; i--) {
550: idx[i] += 1;
551: if( uc[i] > idx[i] ) {
552: return PETSC_TRUE;
553: } else {
554: idx[i] -= uc[i] - lc[i];
555: }
556: }
557: return PETSC_FALSE;
558: }
562: /*@C
563: ADDAGetAggregates - Gets the aggregates that map between
564: grids associated with two ADDAs.
566: Collective on ADDA
568: Input Parameters:
569: + addac - the coarse grid ADDA
570: - addaf - the fine grid ADDA
572: Output Parameters:
573: . rest - the restriction matrix (transpose of the projection matrix)
575: Level: intermediate
577: .keywords: interpolation, restriction, multigrid
579: .seealso: ADDARefine(), ADDAGetInjection(), ADDAGetInterpolation()
580: @*/
581: PetscErrorCode ADDAGetAggregates(ADDA addac,ADDA addaf,Mat *rest)
582: {
583: PetscErrorCode ierr=0;
584: PetscInt i;
585: PetscInt dim;
586: PetscInt dofc, doff;
587: PetscInt *lcs_c, *lce_c;
588: PetscInt *lcs_f, *lce_f;
589: PetscInt *fgs, *fge;
590: PetscInt fgdofs, fgdofe;
591: ADDAIdx iter_c, iter_f;
592: PetscInt max_agg_size;
593: PetscMPIInt comm_size;
594: ADDAIdx *fine_nodes;
595: PetscInt fn_idx;
596: PetscScalar *one_vec;
602: if (addac->dim != addaf->dim) SETERRQ2(PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", addac->dim, addaf->dim);
603: /* if (addac->dof != addaf->dof) SETERRQ2(PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", addac->dof, addaf->dof); */
604: dim = addac->dim;
605: dofc = addac->dof;
606: doff = addaf->dof;
608: ADDAGetCorners(addac, &lcs_c, &lce_c);
609: ADDAGetCorners(addaf, &lcs_f, &lce_f);
610:
611: /* compute maximum size of aggregate */
612: max_agg_size = 1;
613: for(i=0; i<dim; i++) {
614: max_agg_size *= addaf->nodes[i] / addac->nodes[i] + 1;
615: }
616: max_agg_size *= doff / dofc + 1;
618: /* create the matrix that will contain the restriction operator */
619: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
621: /* construct matrix */
622: if( comm_size == 1 ) {
623: ADDAGetMatrixNS(addac, addaf, MATSEQAIJ, rest);
624: MatSeqAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL);
625: } else {
626: ADDAGetMatrixNS(addac, addaf, MATMPIAIJ, rest);
627: MatMPIAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL, max_agg_size, PETSC_NULL);
628: }
629: /* store nodes in the fine grid here */
630: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
631: /* these are the values to set to, a collection of 1's */
632: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
633: /* initialize */
634: for(i=0; i<max_agg_size; i++) {
635: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
636: one_vec[i] = 1.0;
637: }
639: /* get iterators */
640: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
641: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
643: /* the fine grid node corner for each coarse grid node */
644: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
645: PetscMalloc(sizeof(PetscInt)*dim, &fge);
647: /* loop over all coarse nodes */
648: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
649: if( ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x) ) {
650: do {
651: /* find corresponding fine grid nodes */
652: for(i=0; i<dim; i++) {
653: fgs[i] = iter_c.x[i]*addaf->nodes[i]/addac->nodes[i];
654: fge[i] = PetscMin((iter_c.x[i]+1)*addaf->nodes[i]/addac->nodes[i], addaf->nodes[i]);
655: }
656: /* treat all dof of the coarse grid */
657: for(iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
658: /* find corresponding fine grid dof's */
659: fgdofs = iter_c.d*doff/dofc;
660: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
661: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
662: fn_idx = 0;
663: /* loop over those corresponding fine grid nodes */
664: if( ADDAHCiterStartup(dim, fgs, fge, iter_f.x) ) {
665: do {
666: /* loop over all corresponding fine grid dof */
667: for(iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
668: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
669: fine_nodes[fn_idx].d = iter_f.d;
670: fn_idx++;
671: }
672: } while( ADDAHCiter(dim, fgs, fge, iter_f.x) );
673: }
674: /* add all these points to one aggregate */
675: ADDAMatSetValues(*rest, addac, 1, &iter_c, addaf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
676: }
677: } while( ADDAHCiter(dim, lcs_c, lce_c, iter_c.x) );
678: }
680: /* free memory */
681: PetscFree(fgs);
682: PetscFree(fge);
683: PetscFree(iter_c.x);
684: PetscFree(iter_f.x);
685: PetscFree(lcs_c);
686: PetscFree(lce_c);
687: PetscFree(lcs_f);
688: PetscFree(lce_f);
689: PetscFree(one_vec);
690: for(i=0; i<max_agg_size; i++) {
691: PetscFree(fine_nodes[i].x);
692: }
693: PetscFree(fine_nodes);
695: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
696: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
697: return(0);
698: }
702: /*@
703: ADDASetRefinement - Sets the refinement factors of the distributed arrays.
705: Collective on ADDA
707: Input Parameter:
708: + adda - the ADDA object
709: . refine - array of refinement factors
710: - dofrefine - the refinement factor for the dof, usually just 1
712: Level: developer
714: .keywords: distributed array, refinement
715: @*/
716: PetscErrorCode ADDASetRefinement(ADDA adda, PetscInt *refine, PetscInt dofrefine) {
721: PetscMemcpy(adda->refine, refine, adda->dim*sizeof(PetscInt));
722: adda->dofrefine = dofrefine;
723: return(0);
724: }
728: /*@
729: ADDAGetCorners - Gets the corners of the local area
731: Collective on ADDA
733: Input Parameter:
734: . adda - the ADDA object
736: Output Parameter:
737: + lcorner - the "lower" corner
738: - ucorner - the "upper" corner
740: Both lcorner and ucorner are allocated by this procedure and will point to an
741: array of size adda->dim.
743: Level: beginner
745: .keywords: distributed array, refinement
746: @*/
747: PetscErrorCode ADDAGetCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
753: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
754: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
755: PetscMemcpy(*lcorner, adda->lcs, adda->dim*sizeof(PetscInt));
756: PetscMemcpy(*ucorner, adda->lce, adda->dim*sizeof(PetscInt));
757: return(0);
758: }
762: /*@
763: ADDAGetGhostCorners - Gets the ghost corners of the local area
765: Collective on ADDA
767: Input Parameter:
768: . adda - the ADDA object
770: Output Parameter:
771: + lcorner - the "lower" corner of the ghosted area
772: - ucorner - the "upper" corner of the ghosted area
774: Both lcorner and ucorner are allocated by this procedure and will point to an
775: array of size adda->dim.
777: Level: beginner
779: .keywords: distributed array, refinement
780: @*/
781: PetscErrorCode ADDAGetGhostCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
787: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
788: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
789: PetscMemcpy(*lcorner, adda->lgs, adda->dim*sizeof(PetscInt));
790: PetscMemcpy(*ucorner, adda->lge, adda->dim*sizeof(PetscInt));
791: return(0);
792: }
798: /*@C
799: ADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
800: are indexed geometrically with the help of the ADDA data structure.
801: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
802: MUST be called after all calls to ADDAMatSetValues() have been completed.
804: Not Collective
806: Input Parameters:
807: + mat - the matrix
808: . addam - the ADDA geometry information for the rows
809: . m - the number of rows
810: . idxm - the row indices, each of the a proper ADDAIdx
811: + addan - the ADDA geometry information for the columns
812: . n - the number of columns
813: . idxn - the column indices, each of the a proper ADDAIdx
814: . v - a logically two-dimensional array of values of size m*n
815: - addv - either ADD_VALUES or INSERT_VALUES, where
816: ADD_VALUES adds values to any existing entries, and
817: INSERT_VALUES replaces existing entries with new values
819: Notes:
820: By default the values, v, are row-oriented and unsorted.
821: See MatSetOption() for other options.
823: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
824: options cannot be mixed without intervening calls to the assembly
825: routines.
827: Efficiency Alert:
828: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
829: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
831: Level: beginner
833: Concepts: matrices^putting entries in
835: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
836: InsertMode, INSERT_VALUES, ADD_VALUES
837: @*/
838: PetscErrorCode ADDAMatSetValues(Mat mat, ADDA addam, PetscInt m, const ADDAIdx idxm[],
839: ADDA addan, PetscInt n, const ADDAIdx idxn[],
840: const PetscScalar v[], InsertMode addv) {
842: PetscInt *nodemult;
843: PetscInt i, j;
844: PetscInt *matidxm, *matidxn;
845: PetscInt *x, d;
846: PetscInt idx;
849: /* find correct multiplying factors */
850: PetscMalloc(addam->dim*sizeof(PetscInt), &nodemult);
851: nodemult[addam->dim-1] = 1;
852: for(j=addam->dim-2; j>=0; j--) {
853: nodemult[j] = nodemult[j+1]*(addam->nodes[j+1]);
854: }
855: /* convert each coordinate in idxm to the matrix row index */
856: PetscMalloc(m*sizeof(PetscInt), &matidxm);
857: for(i=0; i<m; i++) {
858: x = idxm[i].x; d = idxm[i].d;
859: idx = 0;
860: for(j=addam->dim-1; j>=0; j--) {
861: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
862: if( addam->periodic[j] ) { /* periodic wraps around */
863: x[j] += addam->nodes[j];
864: } else { /* non-periodic get discarded */
865: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
866: goto endofloop_m;
867: }
868: }
869: if( x[j] >= addam->nodes[j] ) { /* "right", "above", etc. of boundary */
870: if( addam->periodic[j] ) { /* periodic wraps around */
871: x[j] -= addam->nodes[j];
872: } else { /* non-periodic get discarded */
873: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
874: goto endofloop_m;
875: }
876: }
877: idx += x[j]*nodemult[j];
878: }
879: matidxm[i] = idx*(addam->dof) + d;
880: endofloop_m:
881: ;
882: }
883: PetscFree(nodemult);
885: /* find correct multiplying factors */
886: PetscMalloc(addan->dim*sizeof(PetscInt), &nodemult);
887: nodemult[addan->dim-1] = 1;
888: for(j=addan->dim-2; j>=0; j--) {
889: nodemult[j] = nodemult[j+1]*(addan->nodes[j+1]);
890: }
891: /* convert each coordinate in idxn to the matrix colum index */
892: PetscMalloc(n*sizeof(PetscInt), &matidxn);
893: for(i=0; i<n; i++) {
894: x = idxn[i].x; d = idxn[i].d;
895: idx = 0;
896: for(j=addan->dim-1; j>=0; j--) {
897: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
898: if( addan->periodic[j] ) { /* periodic wraps around */
899: x[j] += addan->nodes[j];
900: } else { /* non-periodic get discarded */
901: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
902: goto endofloop_n;
903: }
904: }
905: if( x[j] >= addan->nodes[j] ) { /* "right", "above", etc. of boundary */
906: if( addan->periodic[j] ) { /* periodic wraps around */
907: x[j] -= addan->nodes[j];
908: } else { /* non-periodic get discarded */
909: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
910: goto endofloop_n;
911: }
912: }
913: idx += x[j]*nodemult[j];
914: }
915: matidxn[i] = idx*(addan->dof) + d;
916: endofloop_n:
917: ;
918: }
919: /* call original MatSetValues() */
920: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
921: /* clean up */
922: PetscFree(nodemult);
923: PetscFree(matidxm);
924: PetscFree(matidxn);
925: return(0);
926: }