matrix_sparse.hpp
Go to the documentation of this file.
1 /*!
2  * \brief Sparse matrix class
3  *
4  * \author O. Krause
5  * \date 2013
6  *
7  *
8  * \par Copyright 1995-2015 Shark Development Team
9  *
10  * <BR><HR>
11  * This file is part of Shark.
12  * <http://image.diku.dk/shark/>
13  *
14  * Shark is free software: you can redistribute it and/or modify
15  * it under the terms of the GNU Lesser General Public License as published
16  * by the Free Software Foundation, either version 3 of the License, or
17  * (at your option) any later version.
18  *
19  * Shark is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
26  *
27  */
28 #ifndef SHARK_LINALG_BLAS_MATRIX_SPARSE_HPP
29 #define SHARK_LINALG_BLAS_MATRIX_SPARSE_HPP
30 
31 #include "matrix_proxy.hpp"
32 
33 namespace shark {
34 namespace blas {
35 
36 template<class T, class I=std::size_t>
37 class compressed_matrix:public matrix_container<compressed_matrix<T, I> > {
39 public:
40  typedef std::size_t size_type;
41  typedef std::ptrdiff_t difference_type;
42  typedef T value_type;
43  typedef value_type scalar_type;
44  typedef T const* const_pointer;
45  typedef T* pointer;
46 
47  typedef I index_type;
48  typedef index_type const* const_index_pointer;
49  typedef index_type* index_pointer;
50 
51  typedef T const &const_reference;
52  class reference {
53  private:
54  const_reference value()const {
55  return const_cast<self_type const&>(m_matrix)(m_i,m_j);
56  }
57  value_type& ref() const {
58  //get array bounds
59  index_type const *start = m_matrix.inner_indices() + m_matrix.m_rowStart[m_i];
60  index_type const *end = m_matrix.inner_indices() + m_matrix.m_rowEnd[m_i];
61  //find position of the index in the array
62  index_type const *pos = std::lower_bound(start,end,m_j);
63 
64  if (pos != end && *pos == m_j)
65  return m_matrix.m_values[(pos-start)+m_matrix.m_rowStart[m_i]];
66  else {
67  //create iterator to the insertion position and insert new element
68  row_iterator posIter(
69  m_matrix.values(),
70  m_matrix.inner_indices(),
71  pos-start + m_matrix.m_rowStart[m_i]
72  ,m_i
73  );
74  return *m_matrix.set_element(posIter, m_j, m_matrix.m_zero);
75  }
76  }
77 
78  public:
79  // Construction and destruction
80  reference(compressed_matrix &m, size_type i, size_type j):
81  m_matrix(m), m_i(i), m_j(j) {}
82 
83  // Assignment
84  value_type& operator = (value_type d)const {
85  return ref() = d;
86  }
87  value_type& operator=(reference const & other){
88  return ref() = other.value();
89  }
90  value_type& operator += (value_type d)const {
91  return ref()+=d;
92  }
93  value_type& operator -= (value_type d)const {
94  return ref()-=d;
95  }
96  value_type& operator *= (value_type d)const {
97  return ref()*=d;
98  }
99  value_type& operator /= (value_type d)const {
100  return ref()/=d;
101  }
102 
103  operator const_reference() const {
104  return value();
105  }
106  private:
107  compressed_matrix& m_matrix;
108  size_type m_i;
109  size_type m_j;
110  };
111 
114 
115  typedef sparse_tag storage_category;
116  typedef elementwise_tag evaluation_category;
117  typedef row_major orientation;
118 
119  // Construction and destruction
121  : m_size1(0), m_size2(0), m_nnz(0)
122  , m_rowStart(1,0), m_indices(0), m_values(0), m_zero(0) {}
123 
124  compressed_matrix(size_type size1, size_type size2, size_type non_zeros = 0)
125  : m_size1(size1), m_size2(size2), m_nnz(0)
126  , m_rowStart(size1 + 1,0)
127  , m_rowEnd(size1,0)
128  , m_indices(non_zeros), m_values(non_zeros), m_zero(0) {}
129 
130  template<class E>
131  compressed_matrix(const matrix_expression<E> &e, size_type non_zeros = 0)
132  : m_size1(e().size1()), m_size2(e().size2()), m_nnz(0)
133  , m_rowStart(e().size1() + 1, 0)
134  , m_rowEnd(e().size1(), 0)
135  , m_indices(non_zeros), m_values(non_zeros), m_zero(0) {
136  assign(*this, e);
137  }
138 
139  // Accessors
140  size_type size1() const {
141  return m_size1;
142  }
143  size_type size2() const {
144  return m_size2;
145  }
146 
147  size_type nnz_capacity() const {
148  return m_values.size();
149  }
150  size_type row_capacity(std::size_t row)const {
151  RANGE_CHECK(row < size1());
152  return m_rowStart[row+1] - m_rowStart[row];
153  }
154  size_type nnz() const {
155  return m_nnz;
156  }
157  size_type inner_nnz(std::size_t row) const {
158  return m_rowEnd[row] - m_rowStart[row];
159  }
160 
161  index_type const *outer_indices() const {
162  if(m_rowStart.empty())
163  return 0;
164  return &m_rowStart[0];
165  }
166  index_type const *outer_indices_end() const {
167  if(m_rowEnd.empty())
168  return 0;
169  return &m_rowEnd[0];
170  }
171  index_type const *inner_indices() const {
172  if(nnz_capacity() == 0)
173  return 0;
174  return &m_indices[0];
175  }
176  value_type const *values() const {
177  if(nnz_capacity() == 0)
178  return 0;
179  return &m_values[0];
180  }
181 
182  index_type *outer_indices() {
183  if(m_rowStart.empty())
184  return 0;
185  return &m_rowStart[0];
186  }
187  index_type *outer_indices_end() {
188  if(m_rowEnd.empty())
189  return 0;
190  return &m_rowEnd[0];
191  }
192  index_type *inner_indices() {
193  if(nnz_capacity() == 0)
194  return 0;
195  return &m_indices[0];
196  }
197  value_type *values() {
198  if(nnz_capacity() == 0)
199  return 0;
200  return &m_values[0];
201  }
202 
203  void set_filled(size_type non_zeros) {
204  m_nnz = non_zeros;
205  }
206 
207  void set_row_filled(size_type i,size_type non_zeros) {
208  SIZE_CHECK(i < size1());
209  SIZE_CHECK(non_zeros <=row_capacity(i));
210 
211  m_rowEnd[i] = m_rowStart[i]+non_zeros;
212  //correct end pointers
213  if(i == size1()-1)
214  m_rowStart[size1()] = m_rowEnd[i];
215  }
216 
217  void resize(size_type size1, size_type size2) {
218  m_size1 = size1;
219  m_size2 = size2;
220  m_nnz = 0;
221  //clear row data
222  m_rowStart.resize(m_size1 + 1);
223  m_rowEnd.resize(m_size1);
224  std::fill(m_rowStart.begin(),m_rowStart.end(),0);
225  std::fill(m_rowEnd.begin(),m_rowEnd.end(),0);
226  }
227  void reserve(size_type non_zeros) {
228  if (non_zeros < nnz_capacity()) return;
229  //non_zeros = std::min(m_size2*m_size1,non_zeros);//this can lead to totally strange errors.
230  m_indices.resize(non_zeros);
231  m_values.resize(non_zeros);
232  }
233 
234  void reserve_row(std::size_t row, std::size_t non_zeros) {
235  RANGE_CHECK(row < size1());
236  non_zeros = std::min(m_size2,non_zeros);
237  if (non_zeros <= row_capacity(row)) return;
238  std::size_t spaceDifference = non_zeros-row_capacity(row);
239 
240  //check if there is place in the end of the container to store the elements
241  if (spaceDifference > nnz_capacity()-m_rowStart.back()) {
242  reserve(nnz_capacity()+std::max<std::size_t>(nnz_capacity(),2*spaceDifference));
243  }
244  //move the elements of the next rows to make room for the reserved space
245  for (std::size_t i = size1()-1; i != row; --i) {
246  value_type *values = this->values() + m_rowStart[i];
247  value_type *valueRowEnd = this->values() + m_rowEnd[i];
248  index_type *indices = inner_indices() + m_rowStart[i];
249  index_type *indicesEnd = inner_indices() + m_rowEnd[i];
250  std::copy_backward(values,valueRowEnd, valueRowEnd+spaceDifference);
251  std::copy_backward(indices,indicesEnd, indicesEnd+spaceDifference);
252  m_rowStart[i]+=spaceDifference;
253  m_rowEnd[i]+=spaceDifference;
254  }
255  m_rowStart.back() +=spaceDifference;
256  SIZE_CHECK(row_capacity(row) == non_zeros);
257  }
258 
259  void clear() {
260  m_nnz = 0;
261  m_rowStart [0] = 0;
262  }
263 
264  // Element access
265  const_reference operator()(size_type i, size_type j) const {
266  SIZE_CHECK(i < size1());
267  SIZE_CHECK(j < size2());
268  //get array bounds
269  index_type const *start = inner_indices() + m_rowStart[i];
270  index_type const *end = inner_indices() + m_rowEnd[i];
271  //find position of the index in the array
272  index_type const *pos = std::lower_bound(start,end,j);
273 
274  if (pos != end && *pos == j)
275  return m_values[(pos-start)+m_rowStart[i]];
276  else
277  return m_zero;
278  }
279 
280  reference operator()(size_type i, size_type j) {
281  SIZE_CHECK(i < size1());
282  SIZE_CHECK(j < size2());
283  return reference(*this,i,j);
284  }
285 
286  // Assignment
287  template<class C> // Container assignment without temporary
289  resize(m().size1(), m().size2());
290  assign(*this, m);
291  return *this;
292  }
293  template<class E>
295  self_type temporary(e, nnz_capacity());
296  swap(temporary);
297  return *this;
298  }
299 
300  // Swapping
302  std::swap(m_size1, m.m_size1);
303  std::swap(m_size2, m.m_size2);
304  std::swap(m_nnz, m.m_nnz);
305  m_rowStart.swap(m.m_rowStart);
306  m_rowEnd.swap(m.m_rowEnd);
307  m_indices.swap(m.m_indices);
308  m_values.swap(m.m_values);
309  }
310 
311  friend void swap(compressed_matrix &m1, compressed_matrix &m2) {
312  m1.swap(m2);
313  }
314 
315  friend void swap_rows(compressed_matrix& a, size_type i, compressed_matrix& b, size_type j) {
316  SIZE_CHECK(i < a.size1());
317  SIZE_CHECK(j < b.size1());
318  SIZE_CHECK(a.size2() == b.size2());
319 
320  //rearrange (i,j) such that i has equal or more elements than j
321  if(a.inner_nnz(i) < b.inner_nnz(j)){
322  swap_rows(b,j,a,i);
323  return;
324  }
325 
326  std::size_t nnzi = a.inner_nnz(i);
327  std::size_t nnzj = b.inner_nnz(j);
328 
329  //reserve enough space for swapping
330  b.reserve_row(j,nnzi);
331  SIZE_CHECK(b.row_capacity(j) >= nnzi);
332  SIZE_CHECK(a.row_capacity(i) >= nnzj);
333 
334  index_type* indicesi = a.inner_indices() + a.m_rowStart[i];
335  index_type* indicesj = b.inner_indices() + b.m_rowStart[j];
336  value_type* valuesi = a.values() + a.m_rowStart[i];
337  value_type* valuesj = b.values() + b.m_rowStart[j];
338 
339  //swap all elements of j with the elements in i, don't care about unitialized elements in j
340  std::swap_ranges(indicesi,indicesi+nnzi,indicesj);
341  std::swap_ranges(valuesi, valuesi+nnzi,valuesj);
342 
343  //if both rows had the same number of elements, we are done.
344  if(nnzi == nnzj)
345  return;
346 
347  //otherwise correct end pointers
348  a.set_row_filled(i,nnzj);
349  b.set_row_filled(j,nnzi);
350  }
351 
352  friend void swap_rows(compressed_matrix& a, size_type i, size_type j) {
353  if(i == j) return;
354  swap_rows(a,i,a,j);
355  }
356 
357  typedef compressed_storage_iterator<value_type const, index_type const> const_row_iterator;
358  typedef compressed_storage_iterator<value_type, index_type const> row_iterator;
359 
360  const_row_iterator row_begin(std::size_t i) const {
361  SIZE_CHECK(i < size1());
362  RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);//internal check
363  return const_row_iterator(values(), inner_indices(), m_rowStart[i],i);
364  }
365 
366  const_row_iterator row_end(std::size_t i) const {
367  SIZE_CHECK(i < size1());
368  RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);//internal check
369  return const_row_iterator(values(), inner_indices(), m_rowEnd[i],i);
370  }
371 
372  row_iterator row_begin(std::size_t i) {
373  SIZE_CHECK(i < size1());
374  RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);//internal check
375  return row_iterator(values(), inner_indices(), m_rowStart[i],i);
376  }
377 
378  row_iterator row_end(std::size_t i) {
379  SIZE_CHECK(i < size1());
380  RANGE_CHECK(m_rowStart[i] <= m_rowEnd[i]);//internal check
381  return row_iterator(values(), inner_indices(), m_rowEnd[i],i);
382  }
383 
384  typedef compressed_storage_iterator<value_type const, index_type const> const_column_iterator;
385  typedef compressed_storage_iterator<value_type, index_type const> column_iterator;
386 
387  row_iterator set_element(row_iterator pos, size_type index, value_type value) {
388  std::size_t row = pos.row();
389  RANGE_CHECK(row < size1());
390  RANGE_CHECK(size_type(row_end(row) - pos) <= row_capacity(row));
391  //todo: check in debug, that iterator position is valid
392 
393  //shortcut: element already exists.
394  if (pos != row_end(row) && pos.index() == index) {
395  *pos = value;
396  return pos;
397  }
398 
399  //get position of the element in the array.
400  difference_type arrayPos = pos - row_begin(row) + m_rowStart[row];
401 
402  //check that there is enough space in the row. this invalidates pos.
403  if (row_capacity(row) == inner_nnz(row))
404  reserve_row(row,std::max<std::size_t>(2*row_capacity(row),1));
405 
406  //copy the remaining elements further to make room for the new element
407  std::copy_backward(
408  m_values.begin()+arrayPos, m_values.begin() + m_rowEnd[row],
409  m_values.begin() + m_rowEnd[row] + 1
410  );
411  std::copy_backward(
412  m_indices.begin()+arrayPos, m_indices.begin() + m_rowEnd[row],
413  m_indices.begin() + m_rowEnd[row] + 1
414  );
415  //insert new element
416  m_values[arrayPos] = value;
417  m_indices[arrayPos] = index;
418  ++m_rowEnd[row];
419  ++m_nnz;
420 
421  //return new iterator to the inserted element.
422  return row_iterator(values(), inner_indices(), arrayPos,row);
423 
424  }
425 
426  row_iterator clear_range(row_iterator start, row_iterator end) {
427  std::size_t row = start.row();
428  RANGE_CHECK(row == end.row());
429  //get position of the elements in the array.
430  size_type rowEndPos = m_rowEnd[row];
431  size_type rowStartPos = m_rowStart[row];
432  difference_type rangeStartPos = start - row_begin(row)+rowStartPos;
433  difference_type rangeEndPos = end - row_begin(row)+rowStartPos;
434  difference_type rangeSize = end - start;
435 
436  //remove the elements in the range
437  std::copy(
438  m_values.begin()+rangeEndPos,m_values.begin() + rowEndPos, m_values.begin() + rangeStartPos
439  );
440  std::copy(
441  m_indices.begin()+rangeEndPos,m_indices.begin() + rowEndPos , m_indices.begin() + rangeStartPos
442  );
443  m_rowEnd[row] -= rangeSize;
444  m_nnz -= rangeSize;
445  //return new iterator to the next element
446  return row_iterator(values(), inner_indices(), rangeStartPos,row);
447  }
448 
449  row_iterator clear_element(row_iterator elem) {
450  RANGE_CHECK(elem != row_end());
451  row_iterator next = elem;
452  ++next;
453  clear_range(elem,next);
454  }
455 
456  // Serialization
457  template<class Archive>
458  void serialize(Archive &ar, const unsigned int /* file_version */) {
459  ar &boost::serialization::make_nvp("outer_indices", m_rowStart);
460  ar &boost::serialization::make_nvp("outer_indices_end", m_rowEnd);
461  ar &boost::serialization::make_nvp("inner_indices", m_indices);
462  ar &boost::serialization::make_nvp("values", m_values);
463  }
464 
465 private:
466  size_type m_size1;
467  size_type m_size2;
468  size_type m_nnz;
469  std::vector<index_type> m_rowStart;
470  std::vector<index_type> m_rowEnd;
471  std::vector<index_type> m_indices;
472  std::vector<value_type> m_values;
473  value_type m_zero;
474 };
475 
476 template<class T>
477 struct matrix_temporary_type<T,row_major,sparse_bidirectional_iterator_tag> {
478  typedef compressed_matrix<T> type;
479 };
480 
481 template<class T>
482 struct matrix_temporary_type<T,unknown_orientation,sparse_bidirectional_iterator_tag> {
483  typedef compressed_matrix<T> type;
484 };
485 
486 template<class T, class I>
487 struct const_expression<compressed_matrix<T,I> >{
488  typedef compressed_matrix<T,I> const type;
489 };
490 template<class T, class I>
491 struct const_expression<compressed_matrix<T,I> const>{
492  typedef compressed_matrix<T,I> const type;
493 };
494 
495 }
496 }
497 
498 #endif