Actual source code: sbaijov.c
1: #define PETSCMAT_DLL
3: /*
4: Routines to compute overlapping regions of a parallel MPI matrix.
5: Used for finding submatrices that were shared across processors.
6: */
7: #include ../src/mat/impls/sbaij/mpi/mpisbaij.h
8: #include petscbt.h
10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
11: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
15: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
16: {
18: PetscInt i,N=C->cmap->N, bs=C->rmap->bs;
19: IS *is_new;
22: PetscMalloc(is_max*sizeof(IS),&is_new);
23: /* Convert the indices into block format */
24: ISCompressIndicesGeneral(N,bs,is_max,is,is_new);
25: if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
26: for (i=0; i<ov; ++i) {
27: MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
28: }
29: for (i=0; i<is_max; i++) {ISDestroy(is[i]);}
30: ISExpandIndicesGeneral(N,bs,is_max,is_new,is);
31: for (i=0; i<is_max; i++) {ISDestroy(is_new[i]);}
32: PetscFree(is_new);
33: return(0);
34: }
36: typedef enum {MINE,OTHER} WhoseOwner;
37: /* data1, odata1 and odata2 are packed in the format (for communication):
38: data[0] = is_max, no of is
39: data[1] = size of is[0]
40: ...
41: data[is_max] = size of is[is_max-1]
42: data[is_max + 1] = data(is[0])
43: ...
44: data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
45: ...
46: data2 is packed in the format (for creating output is[]):
47: data[0] = is_max, no of is
48: data[1] = size of is[0]
49: ...
50: data[is_max] = size of is[is_max-1]
51: data[is_max + 1] = data(is[0])
52: ...
53: data[is_max + 1 + Mbs*i) = data(is[i])
54: ...
55: */
58: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
59: {
60: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
62: PetscMPIInt size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
63: const PetscInt *idx_i;
64: PetscInt idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
65: Mbs,i,j,k,*odata1,*odata2,
66: proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
67: PetscInt proc_end=0,*iwork,len_unused,nodata2;
68: PetscInt ois_max; /* max no of is[] in each of processor */
69: char *t_p;
70: MPI_Comm comm;
71: MPI_Request *s_waits1,*s_waits2,r_req;
72: MPI_Status *s_status,r_status;
73: PetscBT *table; /* mark indices of this processor's is[] */
74: PetscBT table_i;
75: PetscBT otable; /* mark indices of other processors' is[] */
76: PetscInt bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
77: IS garray_local,garray_gl;
80: comm = ((PetscObject)C)->comm;
81: size = c->size;
82: rank = c->rank;
83: Mbs = c->Mbs;
85: PetscObjectGetNewTag((PetscObject)C,&tag1);
86: PetscObjectGetNewTag((PetscObject)C,&tag2);
88: /* create tables used in
89: step 1: table[i] - mark c->garray of proc [i]
90: step 3: table[i] - mark indices of is[i] when whose=MINE
91: table[0] - mark incideces of is[] when whose=OTHER */
92: len = PetscMax(is_max, size);
93: PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);
94: for (i=0; i<len; i++) {
95: table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
96: }
98: MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);
99:
100: /* 1. Send this processor's is[] to other processors */
101: /*---------------------------------------------------*/
102: /* allocate spaces */
103: PetscMalloc(is_max*sizeof(PetscInt),&n);
104: len = 0;
105: for (i=0; i<is_max; i++) {
106: ISGetLocalSize(is[i],&n[i]);
107: len += n[i];
108: }
109: if (!len) {
110: is_max = 0;
111: } else {
112: len += 1 + is_max; /* max length of data1 for one processor */
113: }
115:
116: PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);
117: PetscMalloc(size*sizeof(PetscInt*),&data1_start);
118: for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
120: PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscInt,&iwork,size+1,PetscInt,&Bowners);
122: /* gather c->garray from all processors */
123: ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);
124: ISAllGather(garray_local, &garray_gl);
125: ISDestroy(garray_local);
126: MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);
127: Bowners[0] = 0;
128: for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
129:
130: if (is_max){
131: /* hash table ctable which maps c->row to proc_id) */
132: PetscMalloc(Mbs*sizeof(PetscInt),&ctable);
133: for (proc_id=0,j=0; proc_id<size; proc_id++) {
134: for (; j<C->rmap->range[proc_id+1]/bs; j++) {
135: ctable[j] = proc_id;
136: }
137: }
139: /* hash tables marking c->garray */
140: ISGetIndices(garray_gl,&idx_i);
141: for (i=0; i<size; i++){
142: table_i = table[i];
143: PetscBTMemzero(Mbs,table_i);
144: for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
145: PetscBTSet(table_i,idx_i[j]);
146: }
147: }
148: ISRestoreIndices(garray_gl,&idx_i);
149: } /* if (is_max) */
150: ISDestroy(garray_gl);
152: /* evaluate communication - mesg to who, length, and buffer space */
153: for (i=0; i<size; i++) len_s[i] = 0;
154:
155: /* header of data1 */
156: for (proc_id=0; proc_id<size; proc_id++){
157: iwork[proc_id] = 0;
158: *data1_start[proc_id] = is_max;
159: data1_start[proc_id]++;
160: for (j=0; j<is_max; j++) {
161: if (proc_id == rank){
162: *data1_start[proc_id] = n[j];
163: } else {
164: *data1_start[proc_id] = 0;
165: }
166: data1_start[proc_id]++;
167: }
168: }
169:
170: for (i=0; i<is_max; i++) {
171: ISGetIndices(is[i],&idx_i);
172: for (j=0; j<n[i]; j++){
173: idx = idx_i[j];
174: *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
175: proc_end = ctable[idx];
176: for (proc_id=0; proc_id<=proc_end; proc_id++){ /* for others to process */
177: if (proc_id == rank ) continue; /* done before this loop */
178: if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
179: continue; /* no need for sending idx to [proc_id] */
180: *data1_start[proc_id] = idx; data1_start[proc_id]++;
181: len_s[proc_id]++;
182: }
183: }
184: /* update header data */
185: for (proc_id=0; proc_id<size; proc_id++){
186: if (proc_id== rank) continue;
187: *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
188: iwork[proc_id] = len_s[proc_id] ;
189: }
190: ISRestoreIndices(is[i],&idx_i);
191: }
193: nrqs = 0; nrqr = 0;
194: for (i=0; i<size; i++){
195: data1_start[i] = data1 + i*len;
196: if (len_s[i]){
197: nrqs++;
198: len_s[i] += 1 + is_max; /* add no. of header msg */
199: }
200: }
202: for (i=0; i<is_max; i++) {
203: ISDestroy(is[i]);
204: }
205: PetscFree(n);
206: PetscFree(ctable);
208: /* Determine the number of messages to expect, their lengths, from from-ids */
209: PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);
210: PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
211:
212: /* Now post the sends */
213: PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);
214: k = 0;
215: for (proc_id=0; proc_id<size; proc_id++){ /* send data1 to processor [proc_id] */
216: if (len_s[proc_id]){
217: MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
218: k++;
219: }
220: }
222: /* 2. Receive other's is[] and process. Then send back */
223: /*-----------------------------------------------------*/
224: len = 0;
225: for (i=0; i<nrqr; i++){
226: if (len_r1[i] > len)len = len_r1[i];
227: }
228: PetscFree(len_r1);
229: PetscFree(id_r1);
231: for (proc_id=0; proc_id<size; proc_id++)
232: len_s[proc_id] = iwork[proc_id] = 0;
233:
234: PetscMalloc((len+1)*sizeof(PetscInt),&odata1);
235: PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);
236: PetscBTCreate(Mbs,otable);
238: len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
239: len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
240: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
241: nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
242: odata2_ptr[nodata2] = odata2;
243: len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */
244:
245: k = 0;
246: while (k < nrqr){
247: /* Receive messages */
248: MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
249: if (flag){
250: MPI_Get_count(&r_status,MPIU_INT,&len);
251: proc_id = r_status.MPI_SOURCE;
252: MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
253: MPI_Wait(&r_req,&r_status);
255: /* Process messages */
256: /* make sure there is enough unused space in odata2 array */
257: if (len_unused < len_max){ /* allocate more space for odata2 */
258: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
259: odata2_ptr[++nodata2] = odata2;
260: len_unused = len_est;
261: }
263: MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
264: len = 1 + odata2[0];
265: for (i=0; i<odata2[0]; i++){
266: len += odata2[1 + i];
267: }
269: /* Send messages back */
270: MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
271: k++;
272: odata2 += len;
273: len_unused -= len;
274: len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
275: }
276: }
277: PetscFree(odata1);
278: PetscBTDestroy(otable);
280: /* 3. Do local work on this processor's is[] */
281: /*-------------------------------------------*/
282: /* make sure there is enough unused space in odata2(=data) array */
283: len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
284: if (len_unused < len_max){ /* allocate more space for odata2 */
285: PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);
286: odata2_ptr[++nodata2] = odata2;
287: len_unused = len_est;
288: }
290: data = odata2;
291: MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
292: PetscFree(data1_start);
294: /* 4. Receive work done on other processors, then merge */
295: /*------------------------------------------------------*/
296: /* get max number of messages that this processor expects to recv */
297: MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);
298: PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);
299: PetscFree4(len_s,btable,iwork,Bowners);
301: k = 0;
302: while (k < nrqs){
303: /* Receive messages */
304: MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
305: if (flag){
306: MPI_Get_count(&r_status,MPIU_INT,&len);
307: proc_id = r_status.MPI_SOURCE;
308: MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
309: MPI_Wait(&r_req,&r_status);
310: if (len > 1+is_max){ /* Add data2 into data */
311: data2_i = data2 + 1 + is_max;
312: for (i=0; i<is_max; i++){
313: table_i = table[i];
314: data_i = data + 1 + is_max + Mbs*i;
315: isz = data[1+i];
316: for (j=0; j<data2[1+i]; j++){
317: col = data2_i[j];
318: if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
319: }
320: data[1+i] = isz;
321: if (i < is_max - 1) data2_i += data2[1+i];
322: }
323: }
324: k++;
325: }
326: }
327: PetscFree(data2);
328: PetscFree2(table,t_p);
330: /* phase 1 sends are complete */
331: PetscMalloc(size*sizeof(MPI_Status),&s_status);
332: if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
333: PetscFree(data1);
334:
335: /* phase 2 sends are complete */
336: if (nrqr){MPI_Waitall(nrqr,s_waits2,s_status);}
337: PetscFree2(s_waits1,s_waits2);
338: PetscFree(s_status);
340: /* 5. Create new is[] */
341: /*--------------------*/
342: for (i=0; i<is_max; i++) {
343: data_i = data + 1 + is_max + Mbs*i;
344: ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);
345: }
346: for (k=0; k<=nodata2; k++){
347: PetscFree(odata2_ptr[k]);
348: }
349: PetscFree(odata2_ptr);
351: return(0);
352: }
356: /*
357: MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
358: the work on the local processor.
360: Inputs:
361: C - MAT_MPISBAIJ;
362: data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
363: whose - whose is[] to be processed,
364: MINE: this processor's is[]
365: OTHER: other processor's is[]
366: Output:
367: nidx - whose = MINE:
368: holds input and newly found indices in the same format as data
369: whose = OTHER:
370: only holds the newly found indices
371: table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
372: */
373: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
374: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
375: {
376: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
377: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
378: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data;
380: PetscInt row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
381: PetscInt a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
382: PetscBT table0; /* mark the indices of input is[] for look up */
383: PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
384:
386: Mbs = c->Mbs; mbs = a->mbs;
387: ai = a->i; aj = a->j;
388: bi = b->i; bj = b->j;
389: garray = c->garray;
390: rstart = c->rstartbs;
391: is_max = data[0];
393: PetscBTCreate(Mbs,table0);
394:
395: nidx[0] = is_max;
396: idx_i = data + is_max + 1; /* ptr to input is[0] array */
397: nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */
398: for (i=0; i<is_max; i++) { /* for each is */
399: isz = 0;
400: n = data[1+i]; /* size of input is[i] */
402: /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
403: if (whose == MINE){ /* process this processor's is[] */
404: table_i = table[i];
405: nidx_i = nidx + 1+ is_max + Mbs*i;
406: } else { /* process other processor's is[] - only use one temp table */
407: table_i = table[0];
408: }
409: PetscBTMemzero(Mbs,table_i);
410: PetscBTMemzero(Mbs,table0);
411: if (n==0) {
412: nidx[1+i] = 0; /* size of new is[i] */
413: continue;
414: }
416: isz0 = 0; col_max = 0;
417: for (j=0; j<n; j++){
418: col = idx_i[j];
419: if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
420: if(!PetscBTLookupSet(table_i,col)) {
421: PetscBTSet(table0,col);
422: if (whose == MINE) {nidx_i[isz0] = col;}
423: if (col_max < col) col_max = col;
424: isz0++;
425: }
426: }
427:
428: if (whose == MINE) {isz = isz0;}
429: k = 0; /* no. of indices from input is[i] that have been examined */
430: for (row=0; row<mbs; row++){
431: a_start = ai[row]; a_end = ai[row+1];
432: b_start = bi[row]; b_end = bi[row+1];
433: if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
434: do row search: collect all col in this row */
435: for (l = a_start; l<a_end ; l++){ /* Amat */
436: col = aj[l] + rstart;
437: if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
438: }
439: for (l = b_start; l<b_end ; l++){ /* Bmat */
440: col = garray[bj[l]];
441: if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
442: }
443: k++;
444: if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
445: } else { /* row is not on input is[i]:
446: do col serach: add row onto nidx_i if there is a col in nidx_i */
447: for (l = a_start; l<a_end ; l++){ /* Amat */
448: col = aj[l] + rstart;
449: if (col > col_max) break;
450: if (PetscBTLookup(table0,col)){
451: if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
452: break; /* for l = start; l<end ; l++) */
453: }
454: }
455: for (l = b_start; l<b_end ; l++){ /* Bmat */
456: col = garray[bj[l]];
457: if (col > col_max) break;
458: if (PetscBTLookup(table0,col)){
459: if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
460: break; /* for l = start; l<end ; l++) */
461: }
462: }
463: }
464: }
465:
466: if (i < is_max - 1){
467: idx_i += n; /* ptr to input is[i+1] array */
468: nidx_i += isz; /* ptr to output is[i+1] array */
469: }
470: nidx[1+i] = isz; /* size of new is[i] */
471: } /* for each is */
472: PetscBTDestroy(table0);
473:
474: return(0);
475: }