Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
lesept777 authored Sep 9, 2020
1 parent e4697f3 commit 6adf4f5
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 31 deletions.
73 changes: 47 additions & 26 deletions MLP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -406,9 +406,10 @@ void MLP::displayHeuristics () {
}

void MLP::setHeurInitialize (bool val) { _initialize = val; }
void MLP::setHeurChangeWeights (bool val, float range = 1.0f) {
_changeWeights = val;
_range = range;
void MLP::setHeurChangeWeights (bool val, float range = 1.0f, float proba = 0.0f) {
_changeWeights = val;
_range = range;
_probaZeroWeight = proba;
}
void MLP::setHeurMutateWeights (bool val, float proba = 0.05f, float percent = 0.15f) {
_mutateWeights = val;
Expand Down Expand Up @@ -808,6 +809,7 @@ void MLP::testNet(DATASET* dataset, bool disp)
// Count the prediction errors on the training set and on the test set
void MLP::evaluateNet(DATASET* dataset, float threshold)
{
// _predict = true;
float *Out;
if (OutputLayer->Activation == SOFTMAX) Out = new float [OutputLayer->Units + 1];
else Out = new float [_units[_numLayers - 1]];
Expand Down Expand Up @@ -856,6 +858,7 @@ void MLP::evaluateNet(DATASET* dataset, float threshold)
if (_verbose > 0) Serial.printf("Verifying on %d test data : %2d errors (%.2f%%)\n", _nTest, nError, 100.0 * nError / _nTest);
_eval = false;
delete Out;
// _predict = false;
}

/* Provide the estimated time of the complete training in ms
Expand Down Expand Up @@ -969,6 +972,8 @@ void MLP::randomWeights(float x)
for (int i = 1; i <= Layer[l]->Units; i++){
for (int j = 0; j <= Layer[l - 1]->Units; j++){
Layer[l]->Weight[i][j] = randomFloat(-x, x);
// Randomly set zero weight
if ((float) esp_random() / UINT32_MAX < _probaZeroWeight) Layer[l]->Weight[i][j] = 0;
}
}
}
Expand Down Expand Up @@ -1144,13 +1149,36 @@ void MLP::weightMutation (float proba, float percent)
}
}

void MLP::predict (float* Input, float *Output)
float MLP::predict (float* Input)
{
_predict = true;
process(Input, Output, Output, 1);
if (OutputLayer->Activation != SOFTMAX) Output[0] = Output[0] * _outDelta + _outMinVal;
_predict = false;
_predict = true;
if (OutputLayer->Activation != SOFTMAX) {
float Output;
process(Input, &Output, 0, 1);
Output = Output * _outDelta + _outMinVal;
return Output;
} else {
float Output[_units[_numLayers - 1]];
process(Input, Output, 0, 1);
int indexMax = 0;
float valMax = 0;
for (int j = 0; j < _units[_numLayers - 1]; j++) {
if (Output[j] > valMax) {
valMax = Output[j];
indexMax = j;
}
}
return (float)indexMax;
}
// _predict = !_predict;
}
// void MLP::predict (float* Input, float *Output)
// {
// _predict = true;
// process(Input, Output, Output, 1);
// if (OutputLayer->Activation != SOFTMAX) Output[0] = Output[0] * _outDelta + _outMinVal;
// _predict = false;
// }

// Shift the input & output values of the dataset to the interval [0, 1]
void MLP::processDataset (DATASET* dataset)
Expand Down Expand Up @@ -1244,11 +1272,13 @@ void MLP::process (float* Input, float* Output, float* Target, int batch) {

// Set input
for (int i = 1; i <= InputLayer->Units; i++) {
yield();
// InputLayer->Output[i] = Input[i - 1 + _units[0] * iBatch];
InputLayer->Output[i] = (float) (Input[i - 1 + _units[0] * iBatch] - _inMinVal[i-1]) / _inDelta[i-1];
}

// Forward propagation
// if (_predict) Serial.println(" P3");
for (int l = 0; l < _numLayers - 1; l++) {
// Softmax case
if (l == _numLayers - 2 && OutputLayer->Activation == SOFTMAX) {
Expand Down Expand Up @@ -1338,40 +1368,31 @@ void MLP::process (float* Input, float* Output, float* Target, int batch) {
float MLP::regulL1Weights()
{
float sum = 0;
for (int l = 1; l < _numLayers; l++) {
for (int i = 1; i <= Layer[l]->Units; i++){
for (int j = 0; j <= Layer[l - 1]->Units; j++){
for (int l = 1; l < _numLayers; l++)
for (int i = 1; i <= Layer[l]->Units; i++)
for (int j = 0; j <= Layer[l - 1]->Units; j++)
sum += abs(Layer[l]->Weight[i][j]);
}
}
}
return sum;
}

// Weights regularization L2
float MLP::regulL2Weights()
{
float sum = 0;
for (int l = 1; l < _numLayers; l++) {
for (int i = 1; i <= Layer[l]->Units; i++){
for (int j = 0; j <= Layer[l - 1]->Units; j++){
for (int l = 1; l < _numLayers; l++)
for (int i = 1; i <= Layer[l]->Units; i++)
for (int j = 0; j <= Layer[l - 1]->Units; j++)
sum += Layer[l]->Weight[i][j] * Layer[l]->Weight[i][j];
}
}
}
return sum / 2.0;
}

// Returns the number of weights
int MLP::numberOfWeights()
{
int N = 0;
for (int l = 1; l < _numLayers; l++) {
for (int i = 1; i <= Layer[l]->Units; i++){
for (int j = 0; j <= Layer[l - 1]->Units; j++){
for (int l = 1; l < _numLayers; l++)
for (int i = 1; i <= Layer[l]->Units; i++)
for (int j = 0; j <= Layer[l - 1]->Units; j++)
++N;
}
}
}
return N;
}
14 changes: 10 additions & 4 deletions MLP.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,19 +204,23 @@ class MLP
/*
Methods to set the options one by one
bool : true / false to allow or disable
float, float: set a range (minimum and maximum values)
float, float: set a range (minimum and maximum values) or probability
*/
void setHeurInitialize (bool);
void setHeurChangeWeights (bool, float);
void setHeurZeroWeights (bool, float);
// first float is the weight range, second is the probability to set weight to 0 (for sparsity)
void setHeurChangeWeights (bool, float, float);
// first float is the mutation probability, second is the percent of change
void setHeurMutateWeights (bool, float, float);
void setHeurChangeBatch (bool);
// in the following methods, the float arguments are min and max values of the range
void setHeurChangeEta (bool, float, float);
void setHeurChangeGain (bool, float, float);
void setHeurChangeAlpha (bool, float, float);
void setHeurShuffleDataset (bool);
void setHeurZeroWeights (bool, float);
void setHeurTotalError (bool);
void setHeurSelectWeights (bool);
// in the 2 following methods, the float argument is the value of lambda (regul parameter)
void setHeurRegulL1 (bool, float);
void setHeurRegulL2 (bool, float);
// Display the summary of the heuristics options
Expand Down Expand Up @@ -259,7 +263,8 @@ class MLP
input: a pointer to the array of input data (in the format of the dataset)
output: a pointer to the array of output result
*/
void predict (float*, float*);
// void predict (float*, float*);
float predict (float*);

/*
Various useful functions
Expand Down Expand Up @@ -345,6 +350,7 @@ class MLP
float _zeroThreshold = 0.1f;
int _totalEpochs;
bool _eval = false, _predict = false;
float _probaZeroWeight = 0.0f;

// Private methods
void simulateNet(float*, float*, float*, bool);
Expand Down
2 changes: 1 addition & 1 deletion keywords.txt
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ changeAlpha KEYWORD2
changeBatchSize KEYWORD2
selectWeights KEYWORD2
simulateNet KEYWORD2
trainNet KEYWORD2
trainNetSGD KEYWORD2
testNet KEYWORD2
evaluateNet KEYWORD2
evaluateNet KEYWORD2
Expand Down

0 comments on commit 6adf4f5

Please sign in to comment.