SvmProblems.h
Go to the documentation of this file.
1 /*!
2  *
3  *
4  * \brief -
5  *
6  * \author T. Glasmachers, O.Krause
7  * \date 2013
8  *
9  *
10  * \par Copyright 1995-2015 Shark Development Team
11  *
12  * <BR><HR>
13  * This file is part of Shark.
14  * <http://image.diku.dk/shark/>
15  *
16  * Shark is free software: you can redistribute it and/or modify
17  * it under the terms of the GNU Lesser General Public License as published
18  * by the Free Software Foundation, either version 3 of the License, or
19  * (at your option) any later version.
20  *
21  * Shark is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24  * GNU Lesser General Public License for more details.
25  *
26  * You should have received a copy of the GNU Lesser General Public License
27  * along with Shark. If not, see <http://www.gnu.org/licenses/>.
28  *
29  */
30 #ifndef SHARK_ALGORITHMS_QP_SVMPROBLEMS_H
31 #define SHARK_ALGORITHMS_QP_SVMPROBLEMS_H
32 
34 
35 namespace shark{
36 
37 ///Working-Set-Selection-Kriteria anwendung:
38 ///Kriterium krit;
39 /// value=krit(problem,i,j);
41  /// \brief Select the most violatig pair (MVP)
42  ///
43  /// \return maximal KKT vioation
44  /// \param problem the svm problem to select the working set for
45  /// \param i first working set component
46  /// \param j second working set component
47  template<class Problem>
48  double operator()(Problem& problem, std::size_t& i, std::size_t& j)
49  {
50  double largestUp = -1e100;
51  double smallestDown = 1e100;
52 
53  for (std::size_t a=0; a < problem.active(); a++)
54  {
55  double ga = problem.gradient(a);
56  if (!problem.isUpperBound(a))
57  {
58  if (ga > largestUp)
59  {
60  largestUp = ga;
61  i = a;
62  }
63  }
64  if (!problem.isLowerBound(a))
65  {
66  if (ga < smallestDown)
67  {
68  smallestDown = ga;
69  j = a;
70  }
71  }
72  }
73 
74  // MVP stopping condition
75  return largestUp - smallestDown;
76  }
77 
78  void reset(){}
79 };
80 
81 
83 
84  /// \brief Select a working set according to the second order algorithm of LIBSVM 2.8
85  ///
86  /// \return maximal KKT vioation
87  /// \param problem the svm problem to select the working set for
88  /// \param i first working set component
89  /// \param j second working set component
90  template<class Problem>
91  double operator()(Problem& problem, std::size_t& i, std::size_t& j)
92  {
93  i = 0;
94  j = 1;
95 
96  double smallestDown = 1e100;
97  double largestUp = -1e100;
98 
99  for (std::size_t a=0; a < problem.active(); a++)
100  {
101  double ga = problem.gradient(a);
102  //if (aa < problem.boxMax(a))
103  if (!problem.isUpperBound(a))
104  {
105  if (ga > largestUp)
106  {
107  largestUp = ga;
108  i = a;
109  }
110  }
111  }
112  if (largestUp == -1e100) return 0.0;
113 
114  // find the second index using second order information
115  typename Problem::QpFloatType* q = problem.quadratic().row(i, 0, problem.active());
116  double best = 0.0;
117  for (std::size_t a = 0; a < problem.active(); a++){
118  double ga = problem.gradient(a);
119  if (!problem.isLowerBound(a))
120  {
121  smallestDown=std::min(smallestDown,ga);
122 
123  double gain = detail::maximumGainQuadratic2DOnLine(
124  problem.diagonal(i),
125  problem.diagonal(a),
126  q[a],
127  largestUp,ga
128  );
129  if (gain> best)
130  {
131  best = gain;
132  j = a;
133  }
134  }
135  }
136 
137  if (best == 0.0) return 0.0; // numerical accuracy reached :(
138 
139  // MVP stopping condition
140  return largestUp - smallestDown;
141  }
142 
143  void reset(){}
144 };
145 
147 public:
148  HMGSelectionCriterion():useLibSVM(true),smallProblem(false){}
149 
150  /// \brief Select a working set according to the hybrid maximum gain (HMG) algorithm
151  ///
152  /// \return maximal KKT vioation
153  /// \param problem the svm problem to select the working set for
154  /// \param i first working set component
155  /// \param j second working set component
156  template<class Problem>
157  double operator()(Problem& problem, std::size_t& i, std::size_t& j)
158  {
159  if (smallProblem || useLibSVM || isInCorner(problem))
160  {
161  useLibSVM = false;
162  if(!smallProblem && sqr(problem.active()) < problem.quadratic().getMaxCacheSize())
163  smallProblem = true;
164  LibSVMSelectionCriterion libSVMSelection;
165  double value = libSVMSelection(problem,i, j);
166  last_i = i;
167  last_j = j;
168  return value;
169  }
170  //old HMG
171  MGStep besti = selectMGVariable(problem,last_i);
172  if(besti.violation == 0.0)
173  return 0;
174  MGStep bestj = selectMGVariable(problem,last_j);
175 
176  if(bestj.gain > besti.gain){
177  i = last_j;
178  j = bestj.index;
179  }else{
180  i = last_i;
181  j = besti.index;
182  }
183  if (problem.gradient(i) < problem.gradient(j))
184  std::swap(i, j);
185  last_i = i;
186  last_j = j;
187  return besti.violation;
188  }
189 
190  void reset(){
191  useLibSVM = true;
192  smallProblem = false;
193  }
194 
195 private:
196  template<class Problem>
197  bool isInCorner(Problem const& problem)const{
198  double Li = problem.boxMin(last_i);
199  double Ui = problem.boxMax(last_i);
200  double Lj = problem.boxMin(last_j);
201  double Uj = problem.boxMax(last_j);
202  double eps_i = 1e-8 * (Ui - Li);
203  double eps_j = 1e-8 * (Uj - Lj);
204 
205  if ((problem.alpha(last_i) <= Li + eps_i || problem.alpha(last_i) >= Ui - eps_i)
206  && ((problem.alpha(last_j) <= Lj + eps_j || problem.alpha(last_j) >= Uj - eps_j)))
207  {
208  return true;
209  }
210  return false;
211  }
212  struct MGStep{
213  std::size_t index;//index of variable
214  double violation;//computed gradientValue
215  double gain;
216  };
217  template<class Problem>
218  MGStep selectMGVariable(Problem& problem,std::size_t i) const{
219 
220  //best variable pair found so far
221  std::size_t bestIndex = 0;//index of variable
222  double bestGain = 0;
223 
224  double largestUp = -1e100;
225  double smallestDown = 1e100;
226 
227  // try combinations with b = old_i
228  typename Problem::QpFloatType* q = problem.quadratic().row(i, 0, problem.active());
229  double ab = problem.alpha(i);
230  double db = problem.diagonal(i);
231  double Lb = problem.boxMin(i);
232  double Ub = problem.boxMax(i);
233  double gb = problem.gradient(i);
234  for (std::size_t a = 0; a < problem.active(); a++)
235  {
236  double ga = problem.gradient(a);
237 
238  if (!problem.isUpperBound(a))
239  largestUp = std::max(largestUp,ga);
240  if (!problem.isLowerBound(a))
241  smallestDown = std::min(smallestDown,ga);
242 
243  if (a == i) continue;
244  //get maximum unconstrained step length
245  double denominator = (problem.diagonal(a) + db - 2.0 * q[a]);
246  double mu_max = (ga - gb) / denominator;
247 
248  //check whether a step > 0 is possible at all
249  //~ if( mu_max > 0 && ( problem.isUpperBound(a) || problem.isLowerBound(b)))continue;
250  //~ if( mu_max < 0 && ( problem.isLowerBound(a) || problem.isUpperBound(b)))continue;
251 
252  //constraint step to box
253  double aa = problem.alpha(a);
254  double La = problem.boxMin(a);
255  double Ua = problem.boxMax(a);
256  double mu_star = mu_max;
257  if (aa + mu_star < La) mu_star = La - aa;
258  else if (mu_star + aa > Ua) mu_star = Ua - aa;
259  if (ab - mu_star < Lb) mu_star = ab - Lb;
260  else if (ab - mu_star > Ub) mu_star = ab - Ub;
261 
262  double gain = mu_star * (2.0 * mu_max - mu_star) * denominator;
263 
264  // select the largest gain
265  if (gain > bestGain)
266  {
267  bestGain = gain;
268  bestIndex = a;
269  }
270  }
271  MGStep step;
272  step.violation= largestUp-smallestDown;
273  step.index = bestIndex;
274  step.gain=bestGain;
275  return step;
276 
277  }
278 
279  std::size_t last_i;
280  std::size_t last_j;
281 
282  bool useLibSVM;
283  bool smallProblem;
284 };
285 
286 
287 template<class Problem>
289 public:
290  typedef typename Problem::QpFloatType QpFloatType;
291  typedef typename Problem::MatrixType MatrixType;
293 
294  SvmProblem(Problem& problem)
295  : m_problem(problem)
296  , m_gradient(problem.linear)
297  , m_active(problem.dimensions())
298  , m_alphaStatus(problem.dimensions(),AlphaFree){
299  //compute the gradient if alpha != 0
300  for (std::size_t i=0; i != dimensions(); i++){
301  double v = alpha(i);
302  if (v != 0.0){
303  QpFloatType* q = quadratic().row(i, 0, dimensions());
304  for (std::size_t a=0; a < dimensions(); a++)
305  m_gradient(a) -= q[a] * v;
306  }
307  updateAlphaStatus(i);
308  }
309  }
310  std::size_t dimensions()const{
311  return m_problem.dimensions();
312  }
313 
314  std::size_t active()const{
315  return m_active;
316  }
317 
318  double boxMin(std::size_t i)const{
319  return m_alphaStatus[i]==AlphaDeactivated? alpha(i): m_problem.boxMin(i);
320  }
321  double boxMax(std::size_t i)const{
322  return m_alphaStatus[i]==AlphaDeactivated? alpha(i): m_problem.boxMax(i);
323  }
324  bool isLowerBound(std::size_t i)const{
325  return m_alphaStatus[i] & AlphaLowerBound;
326  }
327  bool isUpperBound(std::size_t i)const{
328  return m_alphaStatus[i] & AlphaUpperBound;
329  }
330 
331  /// representation of the quadratic part of the objective function
332  MatrixType& quadratic(){
333  return m_problem.quadratic;
334  }
335 
336  double linear(std::size_t i)const{
337  return m_problem.linear(i);
338  }
339 
340  double alpha(std::size_t i)const{
341  return m_problem.alpha(i);
342  }
343 
344  double diagonal(std::size_t i)const{
345  return m_problem.diagonal(i);
346  }
347 
348  double gradient(std::size_t i)const{
349  return m_gradient(i);
350  }
351 
352  std::size_t permutation(std::size_t i)const{
353  return m_problem.permutation[i];
354  }
355 
356  RealVector getUnpermutedAlpha()const{
357  RealVector alpha(dimensions());
358  for (std::size_t i=0; i<dimensions(); i++)
359  alpha(m_problem.permutation[i]) = m_problem.alpha(i);
360  return alpha;
361  }
362 
363  ///\brief Does an update of SMO given a working set with indices i and j.
364  void updateSMO(std::size_t i, std::size_t j){
365  SIZE_CHECK(i < active());
366  SIZE_CHECK(j < active());
367  // get the matrix row corresponding to the first variable of the working set
368  QpFloatType* qi = quadratic().row(i, 0, active());
369 
370  // solve the sub-problem defined by i and j
371  double numerator = gradient(i) - gradient(j);
372  double denominator = diagonal(i) + diagonal(j) - 2.0 * qi[j];
373  denominator = std::max(denominator,1.e-12);
374  double mu = numerator/denominator;
375 
376  //update alpha in a numerically stable way
377  applyStep(i,j, mu);
378  }
379 
380  ///\brief Returns the current function value of the problem.
381  double functionValue()const{
382  return 0.5*inner_prod(m_gradient+m_problem.linear,m_problem.alpha);
383  }
384 
385  bool shrink(double){return false;}
386  void reshrink(){}
387  void unshrink(){}
388 
389  ///\brief Remove the i-th example from the problem while taking the equality constraint into account.
390  ///
391  /// The i-th element is first set to zero and as well as an unspecified set corrected so
392  /// that the constraint is fulfilled.
393  /// after the call boxMin(i) and boxMax(i) are zero.
394  void deactivateVariable(std::size_t i){
395  SIZE_CHECK(i < dimensions());
396  //we need to correct for the equality constraint
397  //that means we have to move enough variables to satisfy the constraint again.
398  for (std::size_t j=0; j<dimensions(); j++){
399  if(alpha(i) == 0.0) break;
400  if (j == i || m_alphaStatus[j] == AlphaDeactivated) continue;
401  //propose the maximum step possible and let applyStep cut it down.
402  applyStep(i,j, -alpha(i));
403 
404  }
405  m_alphaStatus[i] = AlphaDeactivated;
406  }
407  ///\brief Reactivate an previously deactivated variable.
408  void activateVariable(std::size_t i){
409  SIZE_CHECK(i < dimensions());
410  m_alphaStatus[i] = AlphaFree;
411  updateAlphaStatus(i);
412  }
413 
414  /// exchange two variables via the permutation
415  void flipCoordinates(std::size_t i, std::size_t j)
416  {
417  SIZE_CHECK(i < dimensions());
418  SIZE_CHECK(j < dimensions());
419  if (i == j) return;
420 
421  m_problem.flipCoordinates(i, j);
422  std::swap( m_gradient[i], m_gradient[j]);
423  std::swap( m_alphaStatus[i], m_alphaStatus[j]);
424  }
425 
426  /// \brief Scales all box constraints by a constant factor and adapts the solution using a separate scaling
427  void scaleBoxConstraints(double factor, double variableScalingFactor){
428  m_problem.scaleBoxConstraints(factor,variableScalingFactor);
429  for(std::size_t i = 0; i != this->dimensions(); ++i){
430  //don't change deactivated variables
431  if(m_alphaStatus[i] == AlphaDeactivated) continue;
432  m_gradient(i) -= linear(i);
433  m_gradient(i) *= variableScalingFactor;
434  m_gradient(i) += linear(i);
435  updateAlphaStatus(i);
436  }
437  }
438 
439  /// \brief adapts the linear part of the problem and updates the internal data structures accordingly.
440  virtual void setLinear(std::size_t i, double newValue){
441  m_gradient(i) -= linear(i);
442  m_gradient(i) += newValue;
443  m_problem.linear(i) = newValue;
444  }
445 
446  double checkKKT()const{
447  double largestUp = -1e100;
448  double smallestDown = 1e100;
449  for (std::size_t a = 0; a < active(); a++){
450  if (!isLowerBound(a))
451  smallestDown = std::min(smallestDown,gradient(a));
452  if (!isUpperBound(a))
453  largestUp = std::max(largestUp,gradient(a));
454  }
455  return largestUp - smallestDown;
456  }
457 
458 protected:
459  Problem m_problem;
460 
461  /// gradient of the objective function at the current alpha
462  RealVector m_gradient;
463 
464  std::size_t m_active;
465 
466  /// \brief Stores the status, whther alpha is on the lower or upper bound, or whether it is free.
467  std::vector<char> m_alphaStatus;
468 
469  ///\brief Update the problem by a proposed step i taking the box constraints into account.
470  ///
471  /// A step length 0<=lambda<=1 is found so that
472  /// boxMin(i) <= alpha(i)+lambda*step <= boxMax(i)
473  /// and
474  /// boxMin(j) <= alpha(j)-lambda*step <= boxMax(j)
475  /// the update is performed in a numerically stable way and the internal data structures
476  /// are also updated.
477  virtual void applyStep(std::size_t i, std::size_t j, double step){
478  SIZE_CHECK(i < active());
479  SIZE_CHECK(j < active());
480  // do the update of the alpha values carefully - avoid numerical problems
481  double Ui = boxMax(i);
482  double Lj = boxMin(j);
483  double aiOld = m_problem.alpha(i);
484  double ajOld = m_problem.alpha(j);
485  double& ai = m_problem.alpha(i);
486  double& aj = m_problem.alpha(j);
487  if (step >= std::min(Ui - ai, aj - Lj))
488  {
489  if (Ui - ai > aj - Lj)
490  {
491  step = aj - Lj;
492  ai += step;
493  aj = Lj;
494  }
495  else if (Ui - ai < aj - Lj)
496  {
497  step = Ui - ai;
498  ai = Ui;
499  aj -= step;
500  }
501  else
502  {
503  step = Ui - ai;
504  ai = Ui;
505  aj = Lj;
506  }
507  }
508  else
509  {
510  ai += step;
511  aj -= step;
512  }
513 
514  if(ai == aiOld && aj == ajOld)return;
515 
516  //Update internal data structures (gradient and alpha status)
517  QpFloatType* qi = quadratic().row(i, 0, active());
518  QpFloatType* qj = quadratic().row(j, 0, active());
519  for (std::size_t a = 0; a < active(); a++)
520  m_gradient(a) -= step * qi[a] - step * qj[a];
521 
522  //update boundary status
523  updateAlphaStatus(i);
524  updateAlphaStatus(j);
525  }
526 
527  void updateAlphaStatus(std::size_t i){
528  SIZE_CHECK(i < dimensions());
529  m_alphaStatus[i] = AlphaFree;
530  if(m_problem.alpha(i) == boxMax(i))
531  m_alphaStatus[i] |= AlphaUpperBound;
532  if(m_problem.alpha(i) == boxMin(i))
533  m_alphaStatus[i] |= AlphaLowerBound;
534  }
535 };
536 
537 template<class Problem>
538 struct SvmShrinkingProblem : public SvmProblem<Problem>{
539 private:
541 public:
545 
546  SvmShrinkingProblem(Problem& problem, bool shrink=true)
547  : base_type(problem)
548  , m_isUnshrinked(false)
549  , m_shrink(shrink)
550  , m_gradientEdge(problem.linear){}
551 
552  using base_type::alpha;
553  using base_type::gradient;
554  using base_type::linear;
555  using base_type::active;
556  using base_type::dimensions;
557  using base_type::quadratic;
558  using base_type::isLowerBound;
559  using base_type::isUpperBound;
560  using base_type::boxMin;
561  using base_type::boxMax;
562 
563  bool shrink(double epsilon){
564  if(!m_shrink) return false;
565 
566  double largestUp;
567  double smallestDown;
568  getMaxKKTViolations(largestUp,smallestDown,active());
569 
570  // check whether unshrinking is necessary at this accuracy level
571  if (!m_isUnshrinked && (largestUp - smallestDown < 10.0 * epsilon))
572  {
573  unshrink();
574  //recalculate maximum KKT violation for immeediate re-shrinking
575  getMaxKKTViolations(largestUp,smallestDown,dimensions());
576  }
577  //shrink
578  for (std::size_t a = this->active(); a > 0; --a){
579  if(testShrinkVariable(a-1,largestUp,smallestDown))
580  this->shrinkVariable(a-1);
581  }
582  return true;
583  }
584 
585  ///\brief Unshrink the problem
586  void unshrink(){
587  if (active() == dimensions()) return;
588  m_isUnshrinked = true;
589 
590  // recompute the gradient of the whole problem.
591  // we assume here that all shrinked variables are on the border of the problem.
592  // the gradient of the active components is already correct and
593  // we store the gradient of the subset of variables which are on the
594  // borders of the box for the whole set.
595  // Thus we only have to recompute the part of the gradient which is
596  // based on variables in the active set which are not on the border.
597  for (std::size_t a = active(); a < dimensions(); a++)
598  this->m_gradient(a) = m_gradientEdge(a);
599 
600  for (std::size_t i = 0; i < active(); i++)
601  {
602  //check whether alpha value is already stored in gradientEdge
603  if (isUpperBound(i) || isLowerBound(i)) continue;
604 
605  QpFloatType* q = quadratic().row(i, 0, dimensions());
606  for (std::size_t a = active(); a < dimensions(); a++)
607  this->m_gradient(a) -= alpha(i) * q[a] ;
608  }
609 
610  this->m_active = dimensions();
611  }
612 
613  void setShrinking(bool shrinking){
614  m_shrink = shrinking;
615  if(!shrinking)
616  unshrink();
617  }
618 
619  /// \brief Scales all box constraints by a constant factor and adapts the solution by scaling it by the same factor.
620  void scaleBoxConstraints(double factor, double variableScalingFactor){
621  base_type::scaleBoxConstraints(factor,variableScalingFactor);
622  if(factor != variableScalingFactor){
623  for(std::size_t i = 0; i != dimensions(); ++i){
624  m_gradientEdge(i) = linear(i);
625  }
626  }
627  else{
628  for(std::size_t i = 0; i != dimensions(); ++i){
629  m_gradientEdge(i) -= linear(i);
630  m_gradientEdge(i) *= factor;
631  m_gradientEdge(i) += linear(i);
632  }
633  }
634  }
635 
636  /// \brief adapts the linear part of the problem and updates the internal data structures accordingly.
637  virtual void setLinear(std::size_t i, double newValue){
638  m_gradientEdge(i) -= linear(i);
639  m_gradientEdge(i) += newValue;
640  base_type::setLinear(i,newValue);
641  }
642 protected:
643  ///\brief Update the problem by a proposed step i taking the box constraints into account.
644  ///
645  /// A step length 0<=lambda<=1 is found so that
646  /// boxMin(i) <= alpha(i)+lambda*step <= boxMax(i)
647  /// and
648  /// boxMin(j) <= alpha(j)-lambda*step <= boxMax(j)
649  /// the update is performed in a numerically stable way and the internal data structures
650  /// are also updated.
651  virtual void applyStep(std::size_t i, std::size_t j, double step){
652  SIZE_CHECK(i < active());
653  SIZE_CHECK(j < active());
654  double aiOld = alpha(i);
655  double ajOld = alpha(j);
656  //call base class to do the step
657  base_type::applyStep(i,j,step);
658  double ai = alpha(i);
659  double aj = alpha(j);
660  if(!m_shrink || ai == aiOld) return;
661  // there existed a feasible step and we are shrinking,
662  // so update the gradient edge data strcture to keep up with changes
663  updateGradientEdge(i,aiOld,ai);
664  updateGradientEdge(j,ajOld,aj);
665 
666  }
667 private:
668  void updateGradientEdge(std::size_t i, double oldAlpha, double newAlpha){
669  SIZE_CHECK(i < active());
670  bool isInsideOld = oldAlpha > boxMin(i) && oldAlpha < boxMax(i);
671  bool isInsideNew = newAlpha > boxMin(i) && newAlpha < boxMax(i);
672  //check if variable is relevant at all, that means that old and new alpha value are inside
673  //or old alpha is 0 and new alpha inside
674  if( (oldAlpha == 0 || isInsideOld) && isInsideNew )
675  return;
676 
677  //compute change to the gradient
678  double diff = 0;
679  if(!isInsideOld)//the value was on a border, so remove it's old influeence to the gradient
680  diff -=oldAlpha;
681  if(!isInsideNew){//variable entered boundary or changed from one boundary to another
682  diff += newAlpha;
683  }
684 
685  QpFloatType* q = quadratic().row(i, 0, dimensions());
686  for(std::size_t a = 0; a != dimensions(); ++a){
687  m_gradientEdge(a) -= diff*q[a];
688  }
689  }
690  ///\brief Shrink the variable from the Problem.
691  void shrinkVariable(std::size_t i){
692  SIZE_CHECK(i < active());
693  base_type::flipCoordinates(i,active()-1);
694  std::swap( m_gradientEdge[i], m_gradientEdge[active()-1]);
695  --this->m_active;
696  }
697 
698 
699  bool testShrinkVariable(std::size_t a, double largestUp, double smallestDown)const{
700  if (
701  ( isLowerBound(a) && gradient(a) < smallestDown)
702  || ( isUpperBound(a) && gradient(a) >largestUp)
703  ){
704  // In this moment no feasible step including this variable
705  // can improve the objective. Thus deactivate the variable.
706  return true;
707  }
708  return false;
709  }
710 
711  void getMaxKKTViolations(double& largestUp, double& smallestDown, std::size_t maxIndex){
712  largestUp = -1e100;
713  smallestDown = 1e100;
714  for (std::size_t a = 0; a < maxIndex; a++){
715  if (!isLowerBound(a))
716  smallestDown = std::min(smallestDown,gradient(a));
717  if (!isUpperBound(a))
718  largestUp = std::max(largestUp,gradient(a));
719  }
720  }
721 
722  bool m_isUnshrinked;
723 
724  ///\brief true if shrinking is to be used.
725  bool m_shrink;
726 
727  ///\brief Stores the gradient of the alpha dimeensions which are either 0 or C
728  RealVector m_gradientEdge;
729 };
730 
731 }
732 #endif