Shark machine learning library
About Shark
News!
Contribute
Credits and copyright
Downloads
Getting Started
Installation
Using the docs
Documentation
Tutorials
Quick references
Class list
Global functions
FAQ
Showroom
include
shark
Algorithms
StoppingCriteria
TrainingError.h
Go to the documentation of this file.
1
/*!
2
*
3
*
4
* \brief Stopping Criterion which stops, when the trainign error seems to converge
5
*
6
*
7
*
8
* \author O. Krause
9
* \date 2010
10
*
11
*
12
* \par Copyright 1995-2015 Shark Development Team
13
*
14
* <BR><HR>
15
* This file is part of Shark.
16
* <http://image.diku.dk/shark/>
17
*
18
* Shark is free software: you can redistribute it and/or modify
19
* it under the terms of the GNU Lesser General Public License as published
20
* by the Free Software Foundation, either version 3 of the License, or
21
* (at your option) any later version.
22
*
23
* Shark is distributed in the hope that it will be useful,
24
* but WITHOUT ANY WARRANTY; without even the implied warranty of
25
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26
* GNU Lesser General Public License for more details.
27
*
28
* You should have received a copy of the GNU Lesser General Public License
29
* along with Shark. If not, see <http://www.gnu.org/licenses/>.
30
*
31
*/
32
33
#ifndef SHARK_TRAINERS_STOPPINGCRITERA_TRAININGERROR_H
34
#define SHARK_TRAINERS_STOPPINGCRITERA_TRAININGERROR_H
35
36
#include "
AbstractStoppingCriterion.h
"
37
#include <
shark/Core/ResultSets.h
>
38
#include <queue>
39
#include <numeric>
40
namespace
shark
{
41
42
/// \brief This stopping criterion tracks the improvement of the error function of the training error over an interval of iterations.
43
///
44
/// If at one point, the difference between the error values of the beginning and the end of the interval are smaller
45
/// than a certain value, this stopping criterion assumes convergence and stops.
46
/// Of course, this may be misleading, when the algorithm temporarily gets stuck at a saddle point of the error surface.
47
/// The functions assumes that the algorithm is minimizing. For details, see:
48
///
49
/// Lutz Prechelt. Early Stopping - but when? In Genevieve B. Orr and
50
/// Klaus-Robert Müller: Neural Networks: Tricks of the Trade, volume
51
/// 1524 of LNCS, Springer, 1997.
52
///
53
template
<
class
Po
int
Type = RealVector>
54
class
TrainingError
:
public
AbstractStoppingCriterion
< SingleObjectiveResultSet<PointType> >{
55
public
:
56
/// constructs the TrainingError generalization loss
57
/// @param intervalSize size of the interval over which the progress is monitored
58
/// @param minDifference minimum difference between start and end of the interval allowed before training stops
59
TrainingError
(
size_t
intervalSize,
double
minDifference){
60
m_minDifference
= minDifference;
61
m_intervalSize
= intervalSize;
62
reset
();
63
}
64
/// returns true if training should stop
65
bool
stop
(
const
SingleObjectiveResultSet<PointType>
&
set
){
66
67
m_interval
.pop();
68
m_interval
.push(
set
.value);
69
return
(
m_interval
.front()-
set
.value) >= 0
70
&& (
m_interval
.front()-
set
.value) <
m_minDifference
;
71
}
72
/// resets the internal state
73
void
reset
(){
74
m_interval
= std::queue<double>();
75
for
(
size_t
i = 0; i !=
m_intervalSize
;++i) {
76
m_interval
.push(
std::numeric_limits<double>::max
());
77
}
78
}
79
protected
:
80
/// monitored training interval
81
std::queue<double>
m_interval
;
82
/// minmum difference allowed
83
double
m_minDifference
;
84
/// size of the interval
85
size_t
m_intervalSize
;
86
};
87
}
88
89
90
#endif