Actual source code: general.c
1: #define PETSCVEC_DLL
2: /*
3: Provides the functions for index sets (IS) defined by a list of integers.
4: */
5: #include ../src/vec/is/impls/general/general.h
6: #include petscvec.h
10: PetscErrorCode ISDuplicate_General(IS is,IS *newIS)
11: {
13: IS_General *sub = (IS_General *)is->data;
16: ISCreateGeneral(((PetscObject)is)->comm,sub->n,sub->idx,newIS);
17: return(0);
18: }
22: PetscErrorCode ISDestroy_General(IS is)
23: {
24: IS_General *is_general = (IS_General*)is->data;
28: if (is_general->allocated) {
29: PetscFree(is_general->idx);
30: }
31: PetscFree(is_general);
32: return(0);
33: }
37: PetscErrorCode ISIdentity_General(IS is,PetscTruth *ident)
38: {
39: IS_General *is_general = (IS_General*)is->data;
40: PetscInt i,n = is_general->n,*idx = is_general->idx;
43: is->isidentity = PETSC_TRUE;
44: *ident = PETSC_TRUE;
45: for (i=0; i<n; i++) {
46: if (idx[i] != i) {
47: is->isidentity = PETSC_FALSE;
48: *ident = PETSC_FALSE;
49: break;
50: }
51: }
52: return(0);
53: }
57: static PetscErrorCode ISCopy_General(IS is,IS isy)
58: {
59: IS_General *is_general = (IS_General*)is->data,*isy_general = (IS_General*)isy->data;
63: if (is_general->n != isy_general->n || is_general->N != isy_general->N) SETERRQ(PETSC_ERR_ARG_INCOMP,"Index sets incompatible");
64: isy_general->sorted = is_general->sorted;
65: PetscMemcpy(isy_general->idx,is_general->idx,is_general->n*sizeof(PetscInt));
66: return(0);
67: }
71: PetscErrorCode ISGetIndices_General(IS in,const PetscInt *idx[])
72: {
73: IS_General *sub = (IS_General*)in->data;
76: *idx = sub->idx;
77: return(0);
78: }
82: PetscErrorCode ISRestoreIndices_General(IS in,const PetscInt *idx[])
83: {
84: IS_General *sub = (IS_General*)in->data;
87: if (*idx != sub->idx) {
88: SETERRQ(PETSC_ERR_ARG_WRONG,"Must restore with value from ISGetIndices()");
89: }
90: return(0);
91: }
95: PetscErrorCode ISGetSize_General(IS is,PetscInt *size)
96: {
97: IS_General *sub = (IS_General *)is->data;
100: *size = sub->N;
101: return(0);
102: }
106: PetscErrorCode ISGetLocalSize_General(IS is,PetscInt *size)
107: {
108: IS_General *sub = (IS_General *)is->data;
111: *size = sub->n;
112: return(0);
113: }
117: PetscErrorCode ISInvertPermutation_General(IS is,PetscInt nlocal,IS *isout)
118: {
119: IS_General *sub = (IS_General *)is->data;
120: PetscInt i,*ii,n = sub->n,nstart;
121: const PetscInt *idx = sub->idx;
122: PetscMPIInt size;
123: IS istmp,nistmp;
127: MPI_Comm_size(((PetscObject)is)->comm,&size);
128: if (size == 1) {
129: PetscMalloc(n*sizeof(PetscInt),&ii);
130: for (i=0; i<n; i++) {
131: ii[idx[i]] = i;
132: }
133: ISCreateGeneral(PETSC_COMM_SELF,n,ii,isout);
134: ISSetPermutation(*isout);
135: PetscFree(ii);
136: } else {
137: /* crude, nonscalable get entire IS on each processor */
138: if (nlocal == PETSC_DECIDE) SETERRQ(PETSC_ERR_SUP,"Do not yet support nlocal of PETSC_DECIDE");
139: ISAllGather(is,&istmp);
140: ISSetPermutation(istmp);
141: ISInvertPermutation(istmp,PETSC_DECIDE,&nistmp);
142: ISDestroy(istmp);
143: /* get the part we need */
144: MPI_Scan(&nlocal,&nstart,1,MPIU_INT,MPI_SUM,((PetscObject)is)->comm);
145: #if defined(PETSC_USE_DEBUG)
146: {
147: PetscMPIInt rank;
148: MPI_Comm_rank(((PetscObject)is)->comm,&rank);
149: if (rank == size-1) {
150: if (nstart != sub->N) SETERRQ2(PETSC_ERR_ARG_INCOMP,"Sum of nlocal lengths %d != total IS length %d",nstart,sub->N);
151: }
152: }
153: #endif
154: nstart -= nlocal;
155: ISGetIndices(nistmp,&idx);
156: ISCreateGeneral(((PetscObject)is)->comm,nlocal,idx+nstart,isout);
157: ISRestoreIndices(nistmp,&idx);
158: ISDestroy(nistmp);
159: }
160: return(0);
161: }
165: PetscErrorCode ISView_General(IS is,PetscViewer viewer)
166: {
167: IS_General *sub = (IS_General *)is->data;
169: PetscInt i,n = sub->n,*idx = sub->idx;
170: PetscTruth iascii;
173: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
174: if (iascii) {
175: MPI_Comm comm;
176: PetscMPIInt rank,size;
178: PetscObjectGetComm((PetscObject)viewer,&comm);
179: MPI_Comm_rank(comm,&rank);
180: MPI_Comm_size(comm,&size);
182: if (size > 1) {
183: if (is->isperm) {
184: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Index set is permutation\n",rank);
185: }
186: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of indices in set %D\n",rank,n);
187: for (i=0; i<n; i++) {
188: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %D %D\n",rank,i,idx[i]);
189: }
190: } else {
191: if (is->isperm) {
192: PetscViewerASCIISynchronizedPrintf(viewer,"Index set is permutation\n");
193: }
194: PetscViewerASCIISynchronizedPrintf(viewer,"Number of indices in set %D\n",n);
195: for (i=0; i<n; i++) {
196: PetscViewerASCIISynchronizedPrintf(viewer,"%D %D\n",i,idx[i]);
197: }
198: }
199: PetscViewerFlush(viewer);
200: } else {
201: SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported for this object",((PetscObject)viewer)->type_name);
202: }
203: return(0);
204: }
208: PetscErrorCode ISSort_General(IS is)
209: {
210: IS_General *sub = (IS_General *)is->data;
214: if (sub->sorted) return(0);
215: PetscSortInt(sub->n,sub->idx);
216: sub->sorted = PETSC_TRUE;
217: return(0);
218: }
222: PetscErrorCode ISSorted_General(IS is,PetscTruth *flg)
223: {
224: IS_General *sub = (IS_General *)is->data;
227: *flg = sub->sorted;
228: return(0);
229: }
231: static struct _ISOps myops = { ISGetSize_General,
232: ISGetLocalSize_General,
233: ISGetIndices_General,
234: ISRestoreIndices_General,
235: ISInvertPermutation_General,
236: ISSort_General,
237: ISSorted_General,
238: ISDuplicate_General,
239: ISDestroy_General,
240: ISView_General,
241: ISIdentity_General,
242: ISCopy_General };
246: PetscErrorCode ISCreateGeneral_Private(MPI_Comm comm,IS *is)
247: {
249: IS Nindex = *is;
250: IS_General *sub = (IS_General*)Nindex->data;
251: PetscInt n = sub->n,i,min,max;
252: const PetscInt *idx = sub->idx;
253: PetscTruth sorted = PETSC_TRUE;
254: PetscTruth flg = PETSC_FALSE;
258: if (n < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"length < 0");
260: *is = PETSC_NULL;
261: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
262: ISInitializePackage(PETSC_NULL);
263: #endif
265: MPI_Allreduce(&n,&sub->N,1,MPIU_INT,MPI_SUM,comm);
266: for (i=1; i<n; i++) {
267: if (idx[i] < idx[i-1]) {sorted = PETSC_FALSE; break;}
268: }
269: if (n) {min = max = idx[0];} else {min = max = 0;}
270: for (i=1; i<n; i++) {
271: if (idx[i] < min) min = idx[i];
272: if (idx[i] > max) max = idx[i];
273: }
274: sub->sorted = sorted;
275: Nindex->min = min;
276: Nindex->max = max;
277: PetscMemcpy(Nindex->ops,&myops,sizeof(myops));
278: Nindex->isperm = PETSC_FALSE;
279: Nindex->isidentity = PETSC_FALSE;
280: PetscOptionsGetTruth(PETSC_NULL,"-is_view",&flg,PETSC_NULL);
281: if (flg) {
282: PetscViewer viewer;
283: PetscViewerASCIIGetStdout(((PetscObject)Nindex)->comm,&viewer);
284: ISView(Nindex,viewer);
285: }
286: *is = Nindex;
287: return(0);
288: }
293: /*@
294: ISCreateGeneral - Creates a data structure for an index set
295: containing a list of integers.
297: Collective on MPI_Comm
299: Input Parameters:
300: + comm - the MPI communicator
301: . n - the length of the index set
302: - idx - the list of integers
304: Output Parameter:
305: . is - the new index set
307: Notes:
308: The index array is copied to internally allocated storage. After the call,
309: the user can free the index array. Use ISCreateGeneralNC() to use the pointers
310: passed in and NOT make a copy of the index array.
312: When the communicator is not MPI_COMM_SELF, the operations on IS are NOT
313: conceptually the same as MPI_Group operations. The IS are then
314: distributed sets of indices and thus certain operations on them are
315: collective.
317: Level: beginner
319: Concepts: index sets^creating
320: Concepts: IS^creating
322: .seealso: ISCreateGeneralWithArray(), ISCreateStride(), ISCreateBlock(), ISAllGather(), ISCreateGeneralNC()
323: @*/
324: PetscErrorCode ISCreateGeneral(MPI_Comm comm,PetscInt n,const PetscInt idx[],IS *is)
325: {
327: IS Nindex;
328: IS_General *sub;
332: if (n < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"length < 0");
334: *is = PETSC_NULL;
335: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
336: ISInitializePackage(PETSC_NULL);
337: #endif
339: PetscHeaderCreate(Nindex,_p_IS,struct _ISOps,IS_COOKIE,IS_GENERAL,"IS",comm,ISDestroy,ISView);
340: PetscNewLog(Nindex,IS_General,&sub);
341: Nindex->data = (void*)sub;
342: PetscMalloc(n*sizeof(PetscInt),&sub->idx);
343: PetscLogObjectMemory(Nindex,n*sizeof(PetscInt));
344: PetscMemcpy(sub->idx,idx,n*sizeof(PetscInt));
345: sub->n = n;
346: sub->allocated = PETSC_TRUE;
348: *is = Nindex;
349: ISCreateGeneral_Private(comm,is);
351: return(0);
352: }
356: /*@C
357: ISCreateGeneralNC - Creates a data structure for an index set
358: containing a list of integers.
360: Collective on MPI_Comm
362: Input Parameters:
363: + comm - the MPI communicator
364: . n - the length of the index set
365: - idx - the list of integers
367: Output Parameter:
368: . is - the new index set
370: Notes: This routine does not copy the indices, just keeps the pointer to the
371: indices. The ISDestroy() will free the space so it must be obtained
372: with PetscMalloc() and it must not be freed nor modified elsewhere.
373: Use ISCreateGeneral() if you wish to copy the indices passed into the routine.
374: Use ISCreateGeneralWithArray() to NOT copy the indices and NOT free the space when
375: ISDestroy() is called.
377: When the communicator is not MPI_COMM_SELF, the operations on IS are NOT
378: conceptually the same as MPI_Group operations. The IS are then
379: distributed sets of indices and thus certain operations on them are
380: collective.
382: Level: beginner
384: Concepts: index sets^creating
385: Concepts: IS^creating
387: .seealso: ISCreateGeneral(), ISCreateGeneralWithArray(), ISCreateStride(), ISCreateBlock(), ISAllGather()
388: @*/
389: PetscErrorCode ISCreateGeneralNC(MPI_Comm comm,PetscInt n,const PetscInt idx[],IS *is)
390: {
392: IS Nindex;
393: IS_General *sub;
397: if (n < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"length < 0");
399: *is = PETSC_NULL;
400: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
401: ISInitializePackage(PETSC_NULL);
402: #endif
404: PetscHeaderCreate(Nindex,_p_IS,struct _ISOps,IS_COOKIE,IS_GENERAL,"IS",comm,ISDestroy,ISView);
405: PetscNewLog(Nindex,IS_General,&sub);
406: Nindex->data = (void*)sub;
407: sub->n = n;
408: sub->idx = (PetscInt*)idx;
409: sub->allocated = PETSC_TRUE;
411: *is = Nindex;
412: ISCreateGeneral_Private(comm,is);
414: return(0);
415: }
419: /*@C
420: ISCreateGeneralWithArray - Creates a data structure for an index set
421: containing a list of integers.
423: Collective on MPI_Comm
425: Input Parameters:
426: + comm - the MPI communicator
427: . n - the length of the index set
428: - idx - the list of integers
430: Output Parameter:
431: . is - the new index set
433: Notes:
434: Unlike with ISCreateGeneral, the indices are not copied to internally
435: allocated storage. The user array is not freed by ISDestroy().
437: When the communicator is not MPI_COMM_SELF, the operations on IS are NOT
438: conceptually the same as MPI_Group operations. The IS are then
439: distributed sets of indices and thus certain operations on them are collective.
441: Level: beginner
443: Concepts: index sets^creating
444: Concepts: IS^creating
446: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
447: @*/
448: PetscErrorCode ISCreateGeneralWithArray(MPI_Comm comm,PetscInt n,PetscInt idx[],IS *is)
449: {
451: IS Nindex;
452: IS_General *sub;
456: if (n < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"length < 0");
458: *is = PETSC_NULL;
459: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
460: ISInitializePackage(PETSC_NULL);
461: #endif
463: PetscHeaderCreate(Nindex,_p_IS,struct _ISOps,IS_COOKIE,IS_GENERAL,"IS",comm,ISDestroy,ISView);
464: PetscNewLog(Nindex,IS_General,&sub);
465: Nindex->data = (void*)sub;
466: sub->n = n;
467: sub->idx = idx;
468: sub->allocated = PETSC_FALSE;
470: *is = Nindex;
471: ISCreateGeneral_Private(comm,is);
473: return(0);
474: }