29 #include "../Core/util/NonMPL2.h"
31 #ifndef EIGEN_SPARSE_AMD_H
32 #define EIGEN_SPARSE_AMD_H
38 template<
typename T>
inline T amd_flip(
const T& i) {
return -i-2; }
39 template<
typename T>
inline T amd_unflip(
const T& i) {
return i<0 ? amd_flip(i) : i; }
40 template<
typename T0,
typename T1>
inline bool amd_marked(
const T0* w,
const T1& j) {
return w[j]<0; }
41 template<
typename T0,
typename T1>
inline void amd_mark(
const T0* w,
const T1& j) {
return w[j] = amd_flip(w[j]); }
44 template<
typename StorageIndex>
45 static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
48 if(mark < 2 || (mark + lemax < 0))
50 for(k = 0; k < n; k++)
59 template<
typename StorageIndex>
60 StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head,
const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
62 StorageIndex i, p, top = 0;
63 if(!head || !next || !post || !stack)
return (-1);
90 template<
typename Scalar,
typename StorageIndex>
91 void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)
95 StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
96 k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
97 ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
99 StorageIndex n = StorageIndex(C.cols());
100 dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(
double(n))));
101 dense = (std::min)(n-2, dense);
103 StorageIndex cnz = StorageIndex(C.nonZeros());
105 t = cnz + cnz/5 + 2*n;
109 ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
110 StorageIndex* len = W;
111 StorageIndex* nv = W + (n+1);
112 StorageIndex* next = W + 2*(n+1);
113 StorageIndex* head = W + 3*(n+1);
114 StorageIndex* elen = W + 4*(n+1);
115 StorageIndex* degree = W + 5*(n+1);
116 StorageIndex* w = W + 6*(n+1);
117 StorageIndex* hhead = W + 7*(n+1);
118 StorageIndex* last = perm.indices().data();
121 StorageIndex* Cp = C.outerIndexPtr();
122 StorageIndex* Ci = C.innerIndexPtr();
123 for(k = 0; k < n; k++)
124 len[k] = Cp[k+1] - Cp[k];
128 for(i = 0; i <= n; i++)
139 mark = internal::cs_wclear<StorageIndex>(0, 0, w, n);
142 for(i = 0; i < n; i++)
144 bool has_diag =
false;
145 for(p = Cp[i]; p<Cp[i+1]; ++p)
153 if(d == 1 && has_diag)
160 else if(d > dense || !has_diag)
165 Cp[i] = amd_flip (n);
170 if(head[d] != -1) last[head[d]] = i;
183 for(k = -1; mindeg < n && (k = head[mindeg]) == -1; mindeg++) {}
184 if(next[k] != -1) last[next[k]] = -1;
185 head[mindeg] = next[k];
191 if(elenk > 0 && cnz + mindeg >= nzmax)
193 for(j = 0; j < n; j++)
198 Ci[p] = amd_flip (j);
201 for(q = 0, p = 0; p < cnz; )
203 if((j = amd_flip (Ci[p++])) >= 0)
207 for(k3 = 0; k3 < len[j]-1; k3++) Ci[q++] = Ci[p++];
217 pk1 = (elenk == 0) ? p : cnz;
219 for(k1 = 1; k1 <= elenk + 1; k1++)
233 for(k2 = 1; k2 <= ln; k2++)
236 if((nvi = nv[i]) <= 0)
continue;
240 if(next[i] != -1) last[next[i]] = last[i];
243 next[last[i]] = next[i];
247 head[degree[i]] = next[i];
252 Cp[e] = amd_flip (k);
256 if(elenk != 0) cnz = pk2;
263 mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n);
264 for(pk = pk1; pk < pk2; pk++)
267 if((eln = elen[i]) <= 0)
continue;
270 for(p = Cp[i]; p <= Cp[i] + eln - 1; p++)
279 w[e] = degree[e] + wnvi;
285 for(pk = pk1; pk < pk2; pk++)
289 p2 = p1 + elen[i] - 1;
291 for(h = 0, d = 0, p = p1; p <= p2; p++)
305 Cp[e] = amd_flip (k);
310 elen[i] = pn - p1 + 1;
313 for(p = p2 + 1; p < p4; p++)
316 if((nvj = nv[j]) <= 0)
continue;
323 Cp[i] = amd_flip (k);
333 degree[i] = std::min<StorageIndex> (degree[i], d);
337 len[i] = pn - p1 + 1;
345 lemax = std::max<StorageIndex>(lemax, dk);
346 mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n);
349 for(pk = pk1; pk < pk2; pk++)
352 if(nv[i] >= 0)
continue;
356 for(; i != -1 && next[i] != -1; i = next[i], mark++)
360 for(p = Cp[i]+1; p <= Cp[i] + ln-1; p++) w[Ci[p]] = mark;
362 for(j = next[i]; j != -1; )
364 ok = (len[j] == ln) && (elen[j] == eln);
365 for(p = Cp[j] + 1; ok && p <= Cp[j] + ln - 1; p++)
367 if(w[Ci[p]] != mark) ok = 0;
371 Cp[j] = amd_flip (i);
388 for(p = pk1, pk = pk1; pk < pk2; pk++)
391 if((nvi = -nv[i]) <= 0)
continue;
393 d = degree[i] + dk - nvi;
394 d = std::min<StorageIndex> (d, n - nel - nvi);
395 if(head[d] != -1) last[head[d]] = i;
399 mindeg = std::min<StorageIndex> (mindeg, d);
404 if((len[k] = p-pk1) == 0)
409 if(elenk != 0) cnz = p;
413 for(i = 0; i < n; i++) Cp[i] = amd_flip (Cp[i]);
414 for(j = 0; j <= n; j++) head[j] = -1;
415 for(j = n; j >= 0; j--)
417 if(nv[j] > 0)
continue;
418 next[j] = head[Cp[j]];
421 for(e = n; e >= 0; e--)
423 if(nv[e] <= 0)
continue;
426 next[e] = head[Cp[e]];
430 for(k = 0, i = 0; i <= n; i++)
432 if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
435 perm.indices().conservativeResize(n);
442 #endif // EIGEN_SPARSE_AMD_H
Definition: Eigen_Colamd.h:54