Documentaion
Menu
Neural Network Documentaion
Setting Up Your Neural Network in NinjaTrader
Code from YouTube example: https://youtu.be/CfYWEU64Z4c
//This namespace holds Strategies in this folder and is required. Do not change it.
namespace NinjaTrader.NinjaScript.Strategies
{
public class TestNeuralNetwork : Strategy
{
protected override void OnStateChange()
{
if (State == State.SetDefaults)
{
Description = @"";
Name = "TestNeuralNetwork";
Calculate = Calculate.OnBarClose;
EntriesPerDirection = 1;
EntryHandling = EntryHandling.AllEntries;
IsExitOnSessionCloseStrategy = true;
ExitOnSessionCloseSeconds = 30;
IsFillLimitOnTouch = false;
MaximumBarsLookBack = MaximumBarsLookBack.TwoHundredFiftySix;
OrderFillResolution = OrderFillResolution.Standard;
Slippage = 0;
StartBehavior = StartBehavior.WaitUntilFlat;
TimeInForce = TimeInForce.Gtc;
TraceOrders = false;
RealtimeErrorHandling = RealtimeErrorHandling.StopCancelClose;
StopTargetHandling = StopTargetHandling.PerEntryExecution;
BarsRequiredToTrade = 20;
// Disable this property for performance gains in Strategy Analyzer optimizations
// See the Help Guide for additional information
IsInstantiatedOnEachOptimizationIteration = true;
}
else if (State == State.Configure)
{
ClearOutputWindow();
}
else if (State == State.DataLoaded)
{
Model.Layers = new TTNN.Layer[]
{
new TTNN.Layer(2),
new TTNN.Layer(64, "relu"),
new TTNN.Layer(64, "relu"),
new TTNN.Layer(3, "softmax")
};
Model.Init(
optimizerFunction: new TTNN.OptimizerFunction.Adaptive_Momentum(learningRate:0.05),
accuracyFunction: new TTNN.AccuracyFunction.True_Class_Mean(),
lossFunction: new TTNN.LossFunction.Categorical_Crossentropy()
);
Model.Train(
NetworkData.inputValues, NetworkData.outputValues, Test.inputValues, Test.outputValues,
epochs: 10, batchSize: 128, printEveryEpoch: 1
);
TTNNSerialize.WriteXML(Model, "TTNN.xml");
NewModel = TTNNSerialize.ReadXML("TTNN.xml");
Print("New Model Training");
NewModel.Train(
NetworkData.inputValues, NetworkData.outputValues, Test.inputValues, Test.outputValues,
epochs: 10, batchSize: 128, printEveryEpoch: 1
);
}
}
TTNN Model = new TTNN();
TTNN NewModel;
protected override void OnBarUpdate()
{
double[,] input = new double[,] { { 6.65802578E-01, 2.70672355E-01 } };
double[,] output = NewModel.Predict(input);
int maxClass = 0;
for (int i = 0; i < output.Length; i++)
if (output[0, i] > output[0, maxClass])
maxClass = i;
if (maxClass == 0)
{
Print("Buy");
}
else if (maxClass == 1)
{
Print("Hold");
}
else
{
Print("Sell");
}
}
}
}
Setting Up Your Neural Network in C#
1) Create a new instance of the NeuralNetwork class
NeuralNetwork Model = new NeuralNetwork();
2) Set the layer structure of your neural network
Model.Layers = new NeuralNetwork.Layer[]
{
new NeuralNetwork.Layer(2), // input layer
new NeuralNetwork.Layer(64, "relu"),
new NeuralNetwork.Layer(64, "relu"),
new NeuralNetwork.Layer(3, "softmax")
};
/*
* Layer Hyperparameters:
*
* neurons: The number of neurons that will make up layer
* activationFunction: The activationFunction the layer outputs will be passed through
* weightRegulizer_L1: Linear weight regulizer
* biasRegulizer_L1: Linear bias regulizer
* weightRegulizer_L1: Exponential weight regulizer
* biasRegulizer_L1: Exponential bias regulizer
* dropoutRate: The percent of neurons that will be deactivated on each pass through layer
*/
new NeuralNetwork.Layer(neurons: 64, activationFunction: "relu", weightRegulizer_L1: 5E-4, biasRegulizer_L1: 5E-4, weightRegulizer_L2: 5E-4, biasRegulizer_L2: 5E-4, dropoutRate: 0.05)
3) Initialize the neural network
Model.Init();
/*
* Init Hyperparameters:
*
* optimizerFunction: The optimizer utilized by neural network when adjusting weights and biases. At least 1 hyperparameter must be
* used in order to call the appropriate constructor
* lossFunction: The loss function utilized by neural network
* accuracyFunction: The accuracy function utilized by neural network
* randWeights: Toggle setting random weights on first creation of the network
* randBiases: Toggle setting random biases on first creation of the network
* constant: The constant each random value of weights and biases is multiplied by
*/
Model.Init(
optimizerFunction: new NeuralNetwork.OptimizerFunction.Adaptive_Momentum(learningRate:0.001), // must include at least 1 hyperparameter
lossFunction: new NeuralNetwork.LossFunction.Categorical_Crossentropy(),
accuracyFunction: new NeuralNetwork.AccuracyFunction.True_Class_Mean(),
randWeights: true, randBiases: false, constant: 0.01
);
3) Train the neural network
Model.Train(
input: Data.inputValues, target: Data.outputValues, testInput: Test.inputValues, testTarget: Test.outputValues
);
/*
* Train Hyperparameters:
*
* input: The input data used for training
* target: The target data used for training
* testInput: The unseen input data used for evaluating the network
* testTarget: The unseen target data used for evaluating the network
* epochs: The number of times the entire training data set is run through
* batchSize: the size of each batch of inputs passed through network for training
* batchSize: the size of each batch of inputs passed through network for training
* printEveryEpoch: Display results in console every nth epoch
* printEveryIteration: Display results in console every nth batch that is passed through network
*/
Model.Train(
input: Data.inputValues, target: Data.outputValues, testInput: Test.inputValues, testTarget: Test.outputValues,
epochs: 10, batchSize: 128, printEveryEpoch: 1, printEveryIteration: 5
);
4) Predict unknown input values
double[,] predictions = Model.Predict(Test.inputValues);
All Usable Activation Functions
new NeuralNetwork.Layer(64, "linear")
new NeuralNetwork.Layer(64, "relu")
new NeuralNetwork.Layer(64, "leakyrelu")
new NeuralNetwork.Layer(64, "parametricrelu")
new NeuralNetwork.Layer(64, "softmax")
new NeuralNetwork.Layer(64, "sigmoid")
new NeuralNetwork.Layer(64, "tanh")
new NeuralNetwork.Layer(64, "elu")
new NeuralNetwork.Layer(64, "gelu")
All Usable Optimizers
new NeuralNetwork.OptimizerFunction.Stochastic_Gradient_Descent(learningRate: 1, decay: 0, momentum: 0)
new NeuralNetwork.OptimizerFunction.Adaptive_Gradient(learningRate: 1, decay: 0, epsilon: 1E-7)
new NeuralNetwork.OptimizerFunction.Root_Mean_Square_Propegation(learningRate: 0.001, decay: 0, epsilon: 1E-7, rho: 0.9)
new NeuralNetwork.OptimizerFunction.Adaptive_Momentum(learningRate: 0.001, decay: 0, epsilon: 1E-7, beta1: 0.9, beta2: 0.999)
All Usable Loss Functions
new NeuralNetwork.LossFunction.Squared_Error() // non-sequential targets
new NeuralNetwork.LossFunction.Categorical_Crossentropy() // targets must be one-hot encoded
new NeuralNetwork.LossFunction.Binary_Categorical_Crossentropy()
new NeuralNetwork.LossFunction.Mean_Squared_Error()
new NeuralNetwork.LossFunction.Mean_Absolute_Error()
All Usable Accuracy Functions
new NeuralNetwork.AccuracyFunction.True_Class_Mean()
Saving Neural Network for later use
Save neural network to file
NeuralNetwork.Serialization.WriteObject(Model, "SavedNeuralNetwork.xml");
Load in saved neural network
NeuralNetwork NewModel = NeuralNetwork.Serialization.ReadObject("SavedNeuralNetwork.xml");
Save layer weights or biases to file
NeuralNetwork.Serialization.WriteObject(Model.Layers[1].GetWeights, "FirstLayerSaveWeights.xml");
Load in saved layer weights or biases
Model.Layers[1].SetWeights(NeuralNetwork.Serialization.ReadObject("FirstLayerSaveWeights.xml"));
Other Methods
// get a string that displays a summary of the neural network
string properties = Model.GetProperties();
// get the weights and biases of an individual layer
double[,] firstLayerWeights = Model.Layers[1].GetWeights;
double[,] firstLayerBiases = Model.Layers[1].GetBiases;
// get the weights or biases from each layer
double[,] layerWeights = Model.Layers[1].GetWeights();
double[] layerBiases = Model.Layers[1].GetBiases();
// set each layers optimizer function seperately
Model.Layers[1].SetOptimizer(new NeuralNetwork.OptimizerFunction.Stochastic_Gradient_Descent);
// set weights and biases
Model.Layers[1].SetWeights(newWeights);
Model.Layers[1].SetBiases(newBiases);
// Convert nested array, nested list, or jagged array input data to 2d array
double[,] newDataStructure = SetDataStructure(oldDataStructure);
// Convert list or 1d array of sequence target classes to one-hot encoded target structure
double[,] oneHotEncodedTargets = OneHotEncode(sequenceTargetClasses);