ExampleModifiedKernelMatrix.h
Go to the documentation of this file.
1 //===========================================================================
2 /*!
3  *
4  *
5  * \brief Kernel matrix which supports kernel evaluations on data with missing features.
6  *
7  *
8  * \par
9  *
10  *
11  *
12  * \author T. Glasmachers
13  * \date 2007-2012
14  *
15  *
16  * \par Copyright 1995-2015 Shark Development Team
17  *
18  * <BR><HR>
19  * This file is part of Shark.
20  * <http://image.diku.dk/shark/>
21  *
22  * Shark is free software: you can redistribute it and/or modify
23  * it under the terms of the GNU Lesser General Public License as published
24  * by the Free Software Foundation, either version 3 of the License, or
25  * (at your option) any later version.
26  *
27  * Shark is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30  * GNU Lesser General Public License for more details.
31  *
32  * You should have received a copy of the GNU Lesser General Public License
33  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
34  *
35  */
36 //===========================================================================
37 
38 
39 #ifndef SHARK_LINALG_EXAMPLEMODIFIEDKERNELMATRIX_H
40 #define SHARK_LINALG_EXAMPLEMODIFIEDKERNELMATRIX_H
41 
42 #include <shark/Data/Dataset.h>
43 #include <shark/LinAlg/Base.h>
44 
45 #include <vector>
46 #include <cmath>
47 
48 
49 namespace shark {
50 
51 
52 /// Kernel matrix which supports kernel evaluations on data with missing features. At the same time, the entry of the
53 /// Gram matrix between examples i and j can be multiplied by two scaling factors corresponding to
54 /// the examples i and j, respectively. To this end, this class holds a vector of as many scaling coefficients
55 /// as there are examples in the dataset.
56 /// @note: most of code in this class is borrowed from KernelMatrix by copy/paste, which is obviously terribly ugly.
57 /// We could/should refactor classes in this file as soon as possible.
58 template <typename InputType, typename CacheType>
60 {
61 public:
62  typedef CacheType QpFloatType;
63 
64  /// Constructor
65  /// \param kernelfunction kernel function defining the Gram matrix
66  /// \param data data to evaluate the kernel function
68  AbstractKernelFunction<InputType> const& kernelfunction,
69  Data<InputType> const& data)
70  : kernel(kernelfunction)
71  , m_accessCounter( 0 )
72  {
73  std::size_t elements = data.numberOfElements();
74  x.resize(elements);
75  boost::iota(x,data.elements().begin());
76  }
77 
78  /// return a single matrix entry
79  QpFloatType operator () (std::size_t i, std::size_t j) const
80  { return entry(i, j); }
81 
82  /// swap two variables
83  void flipColumnsAndRows(std::size_t i, std::size_t j)
84  { std::swap(x[i], x[j]); }
85 
86  /// return the size of the quadratic matrix
87  std::size_t size() const
88  { return x.size(); }
89 
90  /// query the kernel access counter
91  unsigned long long getAccessCount() const
92  { return m_accessCounter; }
93 
94  /// reset the kernel access counter
96  { m_accessCounter = 0; }
97 
98  /// return a single matrix entry
99  /// Override the Base::entry(...)
100  /// formula: \f$ K\left(x_i, x_j\right)\frac{1}{s_i}\frac{1}{s_j} \f$
101  QpFloatType entry(std::size_t i, std::size_t j) const
102  {
103  // typedef typename InputType::value_type InputValueType;
105  SIZE_CHECK(i < size());
106  SIZE_CHECK(j < size());
107 
108  return (QpFloatType)evalSkipMissingFeatures(
109  kernel,
110  *x[i],
111  *x[j]) * (1.0 / m_scalingCoefficients[i]) * (1.0 / m_scalingCoefficients[j]);
112  }
113 
114  /// \brief Computes the i-th row of the kernel matrix.
115  ///
116  ///The entries start,...,end of the i-th row are computed and stored in storage.
117  ///There must be enough room for this operation preallocated.
118  void row(std::size_t i, std::size_t start,std::size_t end, QpFloatType* storage) const{
119  for(std::size_t j = start; j < end; j++){
120  storage[j-start] = entry(i,j);
121  }
122  }
123 
124  /// \brief Computes the kernel-matrix
125  template<class M>
126  void matrix(
128  ) const{
129  for(std::size_t i = 0; i != size(); ++i){
130  for(std::size_t j = 0; j != size(); ++j){
131  storage(i,j) = entry(i,j);
132  }
133  }
134  }
135 
136  void setScalingCoefficients(const RealVector& scalingCoefficients)
137  {
138  SIZE_CHECK(scalingCoefficients.size() == size());
139  m_scalingCoefficients = scalingCoefficients;
140  }
141 
142 protected:
143 
144  /// Kernel function defining the kernel Gram matrix
146 
148  /// Array of data pointers for kernel evaluations
149  std::vector<PointerType> x;
150  /// counter for the kernel accesses
151  mutable unsigned long long m_accessCounter;
152 
153 private:
154 
155  /// The scaling coefficients
156  RealVector m_scalingCoefficients;
157 };
158 
159 }
160 #endif