DeepNetworkTrainingRBM.cpp
Go to the documentation of this file.
1 //noisy AutoencoderModel model and deep network
2 #include <shark/Models/FFNet.h>// neural network for supervised training
3 #include <shark/Unsupervised/RBM/BinaryRBM.h> // model for unsupervised pre-training
4 
5 //training the model
6 #include <shark/ObjectiveFunctions/ErrorFunction.h>//the error function performing the regularisation of the hidden neurons
7 #include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for unsupervised pre-training
8 #include <shark/ObjectiveFunctions/Loss/CrossEntropy.h> // loss used for supervised training
9 #include <shark/ObjectiveFunctions/Loss/ZeroOneLoss.h> // loss used for evaluation of performance
10 #include <shark/ObjectiveFunctions/Regularizer.h> //L1 and L2 regularisation
11 #include <shark/Algorithms/GradientDescent/SteepestDescent.h> //optimizer: simple gradient descent.
12 #include <shark/Algorithms/GradientDescent/Rprop.h> //optimizer for autoencoders
13 
14 using namespace std;
15 using namespace shark;
16 
17 //our artificial problem
19  std::vector<RealVector> data(320,RealVector(16));
20  std::vector<unsigned int> label(320);
21  RealVector line(4);
22  for(std::size_t k = 0; k != 10; ++k){
23  for(size_t x=0; x != 16; x++) {
24  for(size_t j=0; j != 4; j++) {
25  bool val = (x & (1<<j)) > 0;
26  line(j) = val;
27  if(Rng::coinToss(0.3))
28  line(j) = !val;
29  }
30 
31  for(int i=0; i != 4; i++) {
32  subrange(data[x+k*16],i*4 ,i*4 + 4) = line;
33  }
34  for(int i=0; i != 4; i++) {
35  for(int l=0; l<4; l++) {
36  data[x+k*16+160](l*4 + i) = line(l);
37  }
38  }
39  label[x+k*16] = 1;
40  label[x+k*16+160] = 0;
41  }
42  }
43  return createLabeledDataFromRange(data,label);
44 }
45 
46 //training of an RBM
48  UnlabeledData<RealVector> const& data,//the data to train with
49  std::size_t numHidden,//number of features in the AutoencoderModel
50  std::size_t iterations, //number of iterations to optimize
51  double regularisation,//strength of the regularisation
52  double learningRate // learning rate of steepest descent
53 ){
54  //create rbm with simple binary units using the global random number generator
55  std::size_t inputs = dataDimension(data);
56  BinaryRBM rbm(Rng::globalRng);
57  rbm.setStructure(inputs,numHidden);
58  initRandomUniform(rbm,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));//initialize weights uniformly
59 
60  //create derivative to optimize the rbm
61  //we want a simple vanilla CD-1.
62  BinaryCD estimator(&rbm);
63  TwoNormRegularizer regularizer;
64  //0.0 is the regularization strength. 0.0 means no regularization. choose as >= 0.0
65  estimator.setRegularizer(regularisation,&regularizer);
66  estimator.setK(1);//number of sampling steps
67  estimator.setData(data);//the data used for optimization
68 
69  //create and configure optimizer
70  SteepestDescent optimizer;
71  optimizer.setLearningRate(learningRate);//learning rate of the algorithm
72 
73  //now we train the rbm and evaluate the mean negative log-likelihood at the end
74  unsigned int numIterations = iterations;//iterations for training
75  optimizer.init(estimator);
76  for(unsigned int iteration = 0; iteration != numIterations; ++iteration) {
77  optimizer.step(estimator);
78  }
79  rbm.setParameterVector(optimizer.solution().point);
80  return rbm;
81 }
82 
83 typedef FFNet<LogisticNeuron,LinearNeuron> Network;//final supervised trained structure
84 
85 //unsupervised pre training of a network with two hidden layers
87  UnlabeledData<RealVector> const& data,
88  std::size_t numHidden1,std::size_t numHidden2, std::size_t numOutputs,
89  double regularisation, std::size_t iterations, double learningRate
90 ){
91  //train the first hidden layer
92  std::cout<<"training first layer"<<std::endl;
93  BinaryRBM layer = trainRBM(
94  data,numHidden1,
95  regularisation,iterations, learningRate
96  );
97 
98  //compute the mapping onto features of the first hidden layer
99  layer.evaluationType(true,true);//we compute the direction visible->hidden and want the features and no samples
100  UnlabeledData<RealVector> intermediateData=layer(data);
101 
102  //train the next layer
103  std::cout<<"training second layer"<<std::endl;
104  BinaryRBM layer2 = trainRBM(
105  intermediateData,numHidden2,
106  regularisation,iterations, learningRate
107  );
108  //create the final network
109  Network network;
110  network.setStructure(dataDimension(data),numHidden1,numHidden2, numOutputs);
111  initRandomNormal(network,0.1);
112  network.setLayer(0,layer.weightMatrix(),layer.hiddenNeurons().bias());
113  network.setLayer(1,layer2.weightMatrix(),layer2.hiddenNeurons().bias());
114 
115  return network;
116 }
117 
118 int main()
119 {
120  //model parameters
121  std::size_t numHidden1 = 8;
122  std::size_t numHidden2 = 8;
123  //unsupervised hyper parameters
124  double unsupRegularisation = 0.001;
125  double unsupLearningRate = 0.1;
126  std::size_t unsupIterations = 10000;
127  //supervised hyper parameters
128  double regularisation = 0.0001;
129  std::size_t iterations = 200;
130 
131  //load data and split into training and test
133  data.shuffle();
134  LabeledData<RealVector,unsigned int> test = splitAtElement(data,static_cast<std::size_t>(0.5*data.numberOfElements()));
135 
136  //unsupervised pre training
138  data.inputs(),numHidden1, numHidden2,numberOfClasses(data),
139  unsupRegularisation, unsupIterations, unsupLearningRate
140  );
141 
142  //create the supervised problem. Cross Entropy loss with one norm regularisation
143  CrossEntropy loss;
144  ErrorFunction error(data, &network, &loss);
145  OneNormRegularizer regularizer(error.numberOfVariables());
146  error.setRegularizer(regularisation,&regularizer);
147 
148  //optimize the model
149  std::cout<<"training supervised model"<<std::endl;
150  IRpropPlusFull optimizer;
151  optimizer.init(error);
152  for(std::size_t i = 0; i != iterations; ++i){
153  optimizer.step(error);
154  std::cout<<i<<" "<<optimizer.solution().value<<std::endl;
155  }
156  network.setParameterVector(optimizer.solution().point);
157 
158  //evaluation
160  Data<RealVector> predictionTrain = network(data.inputs());
161  cout << "classification error,train: " << loss01.eval(data.labels(), predictionTrain) << endl;
162 
163  Data<RealVector> prediction = network(test.inputs());
164  cout << "classification error,test: " << loss01.eval(test.labels(), prediction) << endl;
165 
166 }