diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/01 Introduction.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/01 Introduction.html index f81716a41a..a498f4e379 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/01 Introduction.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/01 Introduction.html @@ -1 +1 @@ -

This page explains how to build, train, deploy and store Tensorflow v1 models. To view the tutorial on Tensorflow 2, see Keras.

\ No newline at end of file +

This page explains how to build, train, deploy and store Tensorflow models.

\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/02 Import Libraries.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/02 Import Libraries.html index b19e984c09..1b8542c75f 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/02 Import Libraries.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/02 Import Libraries.html @@ -1,14 +1,6 @@ -

Import the tensorflow libraries.

+

Import the tensorflow and sklearn libraries.

from AlgorithmImports import *
-import tensorflow.compat.v1 as tf
-from google.protobuf import json_format
-import json5
-
-tf.disable_v2_behavior()
-
- -

You need the google.protobuf and json5 libraries to store and load models.

- -

Disable tensorflow v2 behaviors in order to deploy a v1 model.

\ No newline at end of file +import tensorflow as tf + \ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/04 Build Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/04 Build Models.html index d2424974dc..741ffc4fb7 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/04 Build Models.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/04 Build Models.html @@ -10,11 +10,11 @@ Features - The last 5 closing prices + The last 5 close price differencing compared to current price Labels - The following day's closing price + The following day's price change @@ -24,58 +24,23 @@

Follow these steps to create a method to build the model:

    -
  1. Create a method to build the model for the algorithm class.
  2. +
  3. Set the number of layers, their number of nodes, the number of epoch and the learning rate.
  4. -
    def BuildModel(self):    
    -    # Instantiate a tensorflow session
    -    sess = tf.Session()
    -
    -    # Declare the number of factors and then create placeholders for the input and output layers.
    -    num_factors = 5
    -    X = tf.placeholder(dtype=tf.float32, shape=[None, num_factors], name='X')
    -    Y = tf.placeholder(dtype=tf.float32, shape=[None])
    -    
    -    # Set up the weights and bias initializers for each layer.
    -    weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=1)
    -    bias_initializer = tf.zeros_initializer()
    -    
    -    # Create hidden layers that use the Relu activator.
    -    num_neurons_1 = 32
    -    num_neurons_2 = 16
    -    num_neurons_3 = 8
    -    
    -    W_hidden_1 = tf.Variable(weight_initializer([num_factors, num_neurons_1]))
    -    bias_hidden_1 = tf.Variable(bias_initializer([num_neurons_1]))
    -    hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
    -    
    -    W_hidden_2 = tf.Variable(weight_initializer([num_neurons_1, num_neurons_2]))
    -    bias_hidden_2 = tf.Variable(bias_initializer([num_neurons_2]))
    -    hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
    -    
    -    W_hidden_3 = tf.Variable(weight_initializer([num_neurons_2, num_neurons_3]))
    -    bias_hidden_3 = tf.Variable(bias_initializer([num_neurons_3]))
    -    hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
    -    
    -    # Create the output layer and give it a name, so it is accessible after saving and loading the model.
    -    W_out = tf.Variable(weight_initializer([num_neurons_3, 1]))
    -    bias_out = tf.Variable(bias_initializer([1]))
    -    output = tf.transpose(tf.add(tf.matmul(hidden_3, W_out), bias_out), name='outer')
    -    
    -    # Set up the loss function and optimizers for gradient descent optimization and backpropagation.
    -    # This example uses mean-square error as the loss function because the close price is a continuous data and uses Adam as the optimizer because of its adaptive step size.
    -    loss = tf.reduce_mean(tf.squared_difference(output, Y))
    -    optimizer = tf.train.AdamOptimizer().minimize(loss)
    -    
    -    return sess, X, Y, output, optimizer
    -
    - -
  5. Instantiate the model, input layers, output layer, and optimizer and then save them as class variables.
  6. -
    -
    self.model, self.X, self.Y, self.output, self.optimizer = self.BuildModel(features, labels)
    +
    num_factors = 5
    +num_neurons_1 = 10
    +num_neurons_2 = 20
    +num_neurons_3 = 5
    +self.epochs = 20
    +self.learning_rate = 0.0001
    -
  7. Call the run method with the result from the global_variables_initializer method.
  8. +
  9. Create the model using in-built Keras API.
  10. -
    self.model.run(tf.global_variables_initializer())
    +
    self.model = tf.keras.Sequential([
    +    tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)),  # input shape required
    +    tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu),
    +    tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu),
    +    tf.keras.layers.Dense(1)
    +])
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/05 Train Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/05 Train Models.html index 3893f7f4aa..b9820cc091 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/05 Train Models.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/05 Train Models.html @@ -13,22 +13,34 @@

Warm Up Training Data

Define a Training Method

To train the model, define a method that fits the model with the training data.

-
def get_features_and_labels(self, n_steps=5):
-    close_prices = list(self.training_data)[::-1]
-    
-    features = []
-    labels = []
-    for i in range(len(close_prices)-n_steps):
-        features.append(close_prices[i:i+n_steps])
-        labels.append(close_prices[i+n_steps])
-    features = np.array(features)
-    labels = np.array(labels)
-
-    return features, labels
+    
def get_features_and_labels(self, lookback=5):
+    lookback_series = []
+
+    data = pd.Series(list(self.training_data)[::-1])
+    for i in range(1, lookback + 1):
+        df = data.diff(i)[lookback:-1]
+        df.name = f"close-{i}"
+        lookback_series.append(df)
+
+    X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna()
+    Y = data.diff(-1)[lookback:-1].reset_index(drop=True)
+    return X.values, Y.values
 
 def my_training_method(self):
     features, labels = self.get_features_and_labels()
-    self.model.run(self.optimizer, feed_dict={self.X: features, self.Y: labels})
+ + # Define the loss function, we use MSE in this example + def loss_mse(target_y, predicted_y): + return tf.reduce_mean(tf.square(target_y - predicted_y)) + + # Train the model + optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) + for i in range(self.epochs): + with tf.GradientTape() as t: + loss = loss_mse(labels, self.model(features)) + + jac = t.gradient(loss, self.model.trainable_weights) + optimizer.apply_gradients(zip(jac, self.model.trainable_weights))

Set Training Schedule

diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/06 Predict Labels.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/06 Predict Labels.html index 6718e467df..99046c731e 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/06 Predict Labels.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/06 Predict Labels.html @@ -1,13 +1,13 @@

To predict the labels of new data, in the OnData method, get the most recent set of features and then call the run method with new features.

new_features, __ = self.get_features_and_labels()
-prediction = self.model.run(self.output, feed_dict={self.X: new_features[-1].reshape(1, -1)})
-prediction = float(prediction.flatten()[-1])
+prediction = self.model(new_features) +prediction = float(prediction.numpy()[-1])

You can use the label prediction to place orders.

-
if prediction > slice[self.symbol].Price:
+    
if prediction > 0:
     self.SetHoldings(self.symbol, 1)
 else:            
     self.SetHoldings(self.symbol, -1)
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/07 Save Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/07 Save Models.html index f02b19885f..0f733d0846 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/07 Save Models.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/07 Save Models.html @@ -1,22 +1,25 @@

Follow these steps to save Tensorflow models into the Object Store:

    -
  1. Export the TensorFlow graph as a JSON object.
  2. +
  3. Set the key name of the model to be stored in the Object Store.
  4. -
    graph_definition = tf.compat.v1.train.export_meta_graph()
    -json_graph = json_format.MessageToJson(graph_definition)
    +
    model_key = "model.keras"
    +

    Note that the model has to have the suffix .keras.

    -
  5. Export the TensorFlow weights as a JSON object.
  6. +
  7. Call the GetFilePath method with the key.
  8. -
    weights = self.model.run(tf.compat.v1.trainable_variables())
    -weights = [w.tolist() for w in weights]
    -json_weights = json5.dumps(weights)
    +
    file_name = self.ObjectStore.GetFilePath(model_key)
    +

    This method returns the file path where the model will be stored.

    -
  9. Save the graph and weights to the Object Store.
  10. +
  11. Call the save method with the model and file path.
  12. -
    self.ObjectStore.Save('graph', json_graph)
    -self.ObjectStore.Save('weights', json_weights)
    +
    model.save(file_name)
    +
    + +
  13. Save the model to the file path.
  14. +
    +
    self.ObjectStore.Save(model_key)
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/08 Load Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/08 Load Models.html index 88a86261f4..f57d9280f8 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/08 Load Models.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/08 Load Models.html @@ -1,35 +1,10 @@

You can load and trade with pre-trained tensorflow models that you saved in the Object Store. To load a tensorflow model from the Object Store, in the Initialize method, get the file path to the saved model and then recall the graph and weights of the model.

def Initialize(self) -> None:
-    if self.ObjectStore.ContainsKey('graph') and self.ObjectStore.ContainsKey('weights'):
-        json_graph = self.ObjectStore.Read('graph')
-        json_weights = self.ObjectStore.Read('weights')
-
-        # Restore the tensorflow graph from JSON objects
-        tf.reset_default_graph()
-        graph_definition = json_format.Parse(json_graph, tf.MetaGraphDef())
-        self.model = tf.Session()
-        tf.train.import_meta_graph(graph_definition)
-
-        # Select the input, output tensors and optimizer
-        self.X = tf.get_default_graph().get_tensor_by_name('X:0')
-        self.Y = tf.get_default_graph().get_tensor_by_name('Y:0')
-        self.output = tf.get_default_graph().get_tensor_by_name('outer:0')
-        self.optimizer = tf.get_default_graph().get_collection('Variable/Adam')
-        
-        # Restore the model weights from the JSON object.
-        weights = [np.asarray(x) for x in json5.loads(json_weights)]
-        assign_ops = []
-        feed_dict = {}
-        vs = tf.trainable_variables()
-        zipped_values = zip(vs, weights)
-        for var, value in zipped_values:
-            value = np.asarray(value)
-            assign_placeholder = tf.placeholder(var.dtype, shape=value.shape)
-            assign_op = var.assign(assign_placeholder)
-            assign_ops.append(assign_op)
-            feed_dict[assign_placeholder] = value
-        self.model.run(assign_ops, feed_dict=feed_dict)
+ model_key = 'model.keras' + if self.ObjectStore.ContainsKey(model_key): + file_name = self.ObjectStore.GetFilePath(model_key) + self.model = tf.keras.models.load_model(file_name)
-

The ContainsKey method returns a boolean that represents if the graph and weights is in the Object Store. If the Object Store doesn't contain the keys, save the model using them before you proceed.

\ No newline at end of file +

The ContainsKey method returns a boolean that represents if the model.keras is in the Object Store. If the Object Store doesn't contain the keys, save the model using them before you proceed.

\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/09 Clone Example Algorithm.html index 8285a4ab66..b5414bdacb 100644 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/09 Clone Example Algorithm.html +++ b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/09 Clone Example Algorithm.html @@ -1,6 +1,6 @@
- +
\ No newline at end of file diff --git a/04 Research Environment/08 Machine Learning/03 TensorFlow/02 Import Libraries.html b/04 Research Environment/08 Machine Learning/03 TensorFlow/02 Import Libraries.html index 36c2146b15..a5c71edac1 100644 --- a/04 Research Environment/08 Machine Learning/03 TensorFlow/02 Import Libraries.html +++ b/04 Research Environment/08 Machine Learning/03 TensorFlow/02 Import Libraries.html @@ -1,10 +1,8 @@ -

Import the tensorflow, sklearn, json5 and google.protobuf libraries.

+

Import the tensorflow, and sklearn libraries.

import tensorflow as tf
-from sklearn.model_selection import train_test_split
-import json5
-from google.protobuf import json_format
+from sklearn.model_selection import train_test_split
-

You need the sklearn library to prepare the data and the json5 and google.protobuf libraries to save models.

\ No newline at end of file +

You need the sklearn library to prepare the data.

\ No newline at end of file diff --git a/04 Research Environment/08 Machine Learning/03 TensorFlow/04 Prepare Data.html b/04 Research Environment/08 Machine Learning/03 TensorFlow/04 Prepare Data.html index bfe2a50764..ff27b68aa9 100644 --- a/04 Research Environment/08 Machine Learning/03 TensorFlow/04 Prepare Data.html +++ b/04 Research Environment/08 Machine Learning/03 TensorFlow/04 Prepare Data.html @@ -10,11 +10,11 @@ Features - The last 5 closing prices + The last 5 close price differencing to the current price Labels - The following day's closing price + The following day's price change @@ -28,10 +28,11 @@
lookback = 5
 lookback_series = []
 for i in range(1, lookback + 1):
-    df = history['close'].shift(i)[lookback:-1]
-    df.name = f"close_-{i}"
+    df = data['close'].diff(i)[lookback:-1]
+    df.name = f"close-{i}"
     lookback_series.append(df)
-X = pd.concat(lookback_series, axis=1).reset_index(drop=True)
+X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna() +X

The following image shows the format of the features DataFrame:

@@ -39,7 +40,7 @@
  • Select the close column and then call the shift method to collect the labels.
  • -
    Y = history['close'].shift(-1)
    +
    Y = data['close'].diff(-1)
  • Drop the first 5 features and then call the reset_index method.
  • diff --git a/04 Research Environment/08 Machine Learning/03 TensorFlow/05 Train Models.html b/04 Research Environment/08 Machine Learning/03 TensorFlow/05 Train Models.html index 534350f923..c5044549f7 100644 --- a/04 Research Environment/08 Machine Learning/03 TensorFlow/05 Train Models.html +++ b/04 Research Environment/08 Machine Learning/03 TensorFlow/05 Train Models.html @@ -3,89 +3,54 @@

    Build the Model

    Follow these steps to build the model:

      -
    1. Call the reset_default_graph method.
    2. -
      -
      tf.reset_default_graph()
      -
      -

      This method clears the default graph stack and resets the global default graph.

      - -
    3. Call the Session constructor.
    4. -
      -
      sess = tf.Session()
      -
      - -
    5. Declare the number of factors and then create placeholders for the input and output layers.
    6. +
    7. Set the number of layers, their number of nodes, the number of epoch and the learning rate.
    8. num_factors = X_test.shape[1]
      -X = tf.placeholder(dtype=tf.float32, shape=[None, num_factors], name='X')
      -Y = tf.placeholder(dtype=tf.float32, shape=[None])
      -
      - -
    9. Set up the weights and bias initializers for each layer.
    10. -
      -
      weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=1)
      -bias_initializer = tf.zeros_initializer()
      -
      - -
    11. Create hidden layers that use the Relu activator.
    12. -
      -
      num_neurons_1 = 32
      -num_neurons_2 = 16
      -num_neurons_3 = 8
      -
      -W_hidden_1 = tf.Variable(weight_initializer([num_factors, num_neurons_1]))
      -bias_hidden_1 = tf.Variable(bias_initializer([num_neurons_1]))
      -hidden_1 = tf.nn.relu(tf.add(tf.matmul(X, W_hidden_1), bias_hidden_1))
      -
      -W_hidden_2 = tf.Variable(weight_initializer([num_neurons_1, num_neurons_2]))
      -bias_hidden_2 = tf.Variable(bias_initializer([num_neurons_2]))
      -hidden_2 = tf.nn.relu(tf.add(tf.matmul(hidden_1, W_hidden_2), bias_hidden_2))
      -
      -W_hidden_3 = tf.Variable(weight_initializer([num_neurons_2, num_neurons_3]))
      -bias_hidden_3 = tf.Variable(bias_initializer([num_neurons_3]))
      -hidden_3 = tf.nn.relu(tf.add(tf.matmul(hidden_2, W_hidden_3), bias_hidden_3))
      +num_neurons_1 = 10 +num_neurons_2 = 20 +num_neurons_3 = 5 +epochs = 20 +learning_rate = 0.0001
      -
    13. Create the output layer and give it a name.
    14. +
    15. Create hidden layers with the set number of layer and their corresponding number of nodes.
    16. +

      In this example, we're constructing the model with the in-built Keras API, with Relu activator for non-linear activation of each tensors.

      -
      W_out = tf.Variable(weight_initializer([num_neurons_3, 1]))
      -bias_out = tf.Variable(bias_initializer([1]))
      -output = tf.transpose(tf.add(tf.matmul(hidden_3, W_out), bias_out), name='outer')
      +
      model = tf.keras.Sequential([
      +    tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)),  # input shape required
      +    tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu),
      +    tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu),
      +    tf.keras.layers.Dense(1)
      +])
      -

      This snippet creates a 1-node output for both weight and bias. You must name the output layer so you can access it after you load and save the model.

      - -
    17. Set up the loss function and optimizers for gradient descent optimization and backpropagation.
    18. +
    19. Select an optimizer.
    20. +

      We're using Adam optimizer in this example. You may also consider others like SGD.

      -
      loss = tf.reduce_mean(tf.squared_difference(output, Y))
      -optimizer = tf.train.AdamOptimizer().minimize(loss)
      +
      optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
      -

      Use mean-square error as the loss function because the close price is a continuous data and use Adam as the optimizer because of its adaptive step size.

      - -
    21. Set the batch size and number of epochs to bootstrap the training process.
    22. + +
    23. Define the loss function.
    24. +

      In the context of numerical regression, we use MSE as our objective function. If you're doing classification, cross entropy would be more suitable.

      -
      batch_size = len(y_train) // 10
      -epochs = 20
      +
      def loss_mse(target_y, predicted_y):
      +    return tf.reduce_mean(tf.square(target_y - predicted_y))

    Train the Model

    -

    Follow these steps to train the model:

    +

    Iteratively train the model by the set epoch number. The model will train adaptively by the gradient provided by the loss function with the selected optimizer.

    +
    +
    for i in range(epochs):
    +    with tf.GradientTape() as t:
    +        loss = loss_mse(y_train, model(X_train))
     
    -
      -
    1. Call the run method with the result from the global_variables_initializer method.
    2. -
      -
      sess.run(tf.global_variables_initializer())
      -
      + train_loss = loss_mse(y_train, model(X_train)) + test_loss = loss_mse(y_test, model(X_test)) + print(f"""Epoch {i+1}: +Training loss = {train_loss.numpy()}. Test loss = {test_loss.numpy()}""") -
    3. Loop through the number of epochs, select a subset of the training data, and then call the run method with the subset of data.
    4. -
      -
      for _ in range(epochs):
      -    for i in range(0, len(y_train) // batch_size):
      -        start = i * batch_size
      -        batch_x = X_train[start:start + batch_size]
      -        batch_y = y_train[start:start + batch_size]
      -        sess.run(optimizer, feed_dict={X: batch_x, Y: batch_y})
      -
      -
    + jac = t.gradient(loss, model.trainable_weights) + optimizer.apply_gradients(zip(jac, model.trainable_weights))
    +
    \ No newline at end of file diff --git a/04 Research Environment/08 Machine Learning/03 TensorFlow/06 Test Models.html b/04 Research Environment/08 Machine Learning/03 TensorFlow/06 Test Models.html index 9c10dee097..e54b2012fb 100644 --- a/04 Research Environment/08 Machine Learning/03 TensorFlow/06 Test Models.html +++ b/04 Research Environment/08 Machine Learning/03 TensorFlow/06 Test Models.html @@ -1,11 +1,12 @@

    To test the model, we'll setup a method to plot test set predictions ontop of the SPY price.

    -
    def test_model(sess, output, title, X):
    -    prediction = sess.run(output, feed_dict={X: X_test})
    -    prediction = prediction.reshape(prediction.shape[1], 1)
    +    
    def test_model(actual, title, X):
    +    prediction = model(X).numpy()
    +    prediction = prediction.reshape(-1, 1)
     
    -    y_test.reset_index(drop=True).plot(figsize=(16, 6), label="Actual")
    +    plt.figure(figsize=(16, 6))
    +    plt.plot(actual, label="Actual")
         plt.plot(prediction, label="Prediction")
         plt.title(title)
         plt.xlabel("Time step")
    @@ -13,7 +14,7 @@
         plt.legend()
         plt.show()
     
    -test_model(sess, output, "Test Set Results from Original Model", X)
    +test_model(y_test, "Test Set Results from Original Model", X_test)
    Tensorflow model performance \ No newline at end of file diff --git a/04 Research Environment/08 Machine Learning/03 TensorFlow/07 Store Models.html b/04 Research Environment/08 Machine Learning/03 TensorFlow/07 Store Models.html index 967a605cfc..486698efd0 100644 --- a/04 Research Environment/08 Machine Learning/03 TensorFlow/07 Store Models.html +++ b/04 Research Environment/08 Machine Learning/03 TensorFlow/07 Store Models.html @@ -4,29 +4,26 @@

    Save Models

    Follow these steps to save models in the Object Store:

      -
    1. Export the TensorFlow graph as a JSON object.
    2. +
    3. Set the key name of the model to be stored in the Object Store.
    4. -
      graph_definition = tf.compat.v1.train.export_meta_graph()
      -json_graph = json_format.MessageToJson(graph_definition)
      +
      model_key = "model.keras"
      +

      Note that the model has to have the suffix .keras.

      -
    5. Export the TensorFlow weights as a JSON object.
    6. +
    7. Call the GetFilePath method with the key.
    8. -
      # Define a function to get the weights from the tensorflow session
      -def get_json_weights(sess):
      -    weights = sess.run(tf.compat.v1.trainable_variables())
      -    weights = [w.tolist() for w in weights]
      -    weights_list = json5.dumps(weights)
      -    return weights_list
      -    
      -json_weights = get_json_weights(sess)
      -sess.close()    # Close the session opened by the `get_json_weights` function
      +
      file_name = qb.ObjectStore.GetFilePath(model_key)
      +

      This method returns the file path where the model will be stored.

      -
    9. Save the graph and weights to the Object Store.
    10. +
    11. Call the save method with the model and file path.
    12. -
      qb.ObjectStore.Save('graph', json_graph)
      -qb.ObjectStore.Save('weights', json_weights)
      +
      model.save(file_name)
      +
      + +
    13. Save the model to the file path.
    14. +
      +
      qb.ObjectStore.Save(model_key)
    @@ -34,39 +31,13 @@

    Load Models

    You must save a model into the Object Store before you can load it from the Object Store. If you saved a model, follow these steps to load it:

      -
    1. Read the model graph and weights from the Object Store.
    2. -
      -
      json_graph = qb.ObjectStore.Read('graph')
      -json_weights = qb.ObjectStore.Read('weights')
      -
      - -
    3. Restore the TensorFlow graph from the JSON object.
    4. -
      -
      tf.reset_default_graph()
      -graph_definition = json_format.Parse(json_graph, tf.compat.v1.MetaGraphDef())
      -sess = tf.Session()
      -tf.compat.v1.train.import_meta_graph(graph_definition)
      -
      - -
    5. Select the input and output tensors.
    6. +
    7. Get the file path from the Object Store.
    8. -
      X = tf.compat.v1.get_default_graph().get_tensor_by_name('X:0')
      -output = tf.compat.v1.get_default_graph().get_tensor_by_name('outer:0')
      +
      file_path = self.ObjectStore.GetFilePath(model_key)
      -
    9. Restore the model weights from the JSON object.
    10. +
    11. Restore the TensorFlow model from the saved path.
    12. -
      weights = [np.asarray(x) for x in json5.loads(json_weights)]
      -assign_ops = []
      -feed_dict = {}
      -vs = tf.compat.v1.trainable_variables()
      -zipped_values = zip(vs, weights)
      -for var, value in zipped_values:
      -    value = np.asarray(value)
      -    assign_placeholder = tf.placeholder(var.dtype, shape=value.shape)
      -    assign_op = var.assign(assign_placeholder)
      -    assign_ops.append(assign_op)
      -    feed_dict[assign_placeholder] = value
      -sess.run(assign_ops, feed_dict=feed_dict)
      +
      model = tf.keras.models.load_model(file_name)