From a8db14e494fff60a42f5614f86a17f0a83eb92be Mon Sep 17 00:00:00 2001 From: William Baker Date: Mon, 27 Mar 2023 09:49:06 +0100 Subject: [PATCH] Cleaned code and revised Readme --- README.md | 1 + examples/deprecated/legacy_train_cnp.py | 1 - results/results_graphics.py | 12 ++++-------- results/results_half.py | 6 ++++-- results/results_latent_prior_vs_posterior.py | 6 ++++-- results/results_regression.py | 6 ++++-- results/results_split.py | 6 ++++-- results/results_varying_kernel.py | 6 ++++-- train.py | 16 ++++++++-------- 9 files changed, 33 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index c7dcf11..4e44826 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ CNP, LNP and HNP all have a similar encoder-decoder architecture. They have been ## 4. Experiments +Training can either be conducted in a interactive session (iPython) with arguments set in the section beginning ln 40 (Training parameters). Or by commenting section ln40 and uncommenting section ln 25 (Parse Training parameters) the terminal and it's cmd arguments can be used. ### 4.1. Regression training diff --git a/examples/deprecated/legacy_train_cnp.py b/examples/deprecated/legacy_train_cnp.py index bcc3a9d..661e273 100644 --- a/examples/deprecated/legacy_train_cnp.py +++ b/examples/deprecated/legacy_train_cnp.py @@ -1,6 +1,5 @@ #%% import os -#os.chdir("c:Users/baker/Documents/MLMI4/conditional-neural-processes/") import argparse from datetime import datetime diff --git a/results/results_graphics.py b/results/results_graphics.py index 16401b2..77fd0c2 100644 --- a/results/results_graphics.py +++ b/results/results_graphics.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it import matplotlib.pyplot as plt @@ -13,7 +15,6 @@ pth = f'.data/{args.model}_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/'# + "cp-0030.ckpt" model, train_ds, test_ds = load_model_and_dataset(args, pth) -#model.load_weights(pth) #%% @@ -24,11 +25,6 @@ #for i, num_context in enumerate([1,10,100,1000]):#([1,10,100,1000]): for i, num_context in enumerate([1,10,100,1000]):#([1,10,100,1000]): - #model.load_weights(f'trained_models/model_{args.task}_context_{num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0015.ckpt") - #model.load_weights(f'.data/CNP2_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0010.ckpt") - - #model.load_weights(f'.data/CNP2_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0010.ckpt") - if args.task == 'celeb': diff --git a/results/results_half.py b/results/results_half.py index 0dc50bd..7639434 100644 --- a/results/results_half.py +++ b/results/results_half.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it from dataloader.load_mnist import half_load_mnist diff --git a/results/results_latent_prior_vs_posterior.py b/results/results_latent_prior_vs_posterior.py index ed0203d..014a9d2 100644 --- a/results/results_latent_prior_vs_posterior.py +++ b/results/results_latent_prior_vs_posterior.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it diff --git a/results/results_regression.py b/results/results_regression.py index 90c2dd4..afa9adc 100644 --- a/results/results_regression.py +++ b/results/results_regression.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it import matplotlib.pyplot as plt diff --git a/results/results_split.py b/results/results_split.py index 13f15f9..2f49ca9 100644 --- a/results/results_split.py +++ b/results/results_split.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it from dataloader.load_mnist import split_load_mnist diff --git a/results/results_varying_kernel.py b/results/results_varying_kernel.py index 543892c..da384bb 100644 --- a/results/results_varying_kernel.py +++ b/results/results_varying_kernel.py @@ -1,6 +1,8 @@ #%% -import os -os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/") +import os, sys +pth = os.path.abspath(os.path.join(os.getcwd(), "..")) +os.chdir(pth) # change to project directory +sys.path.append(pth) from utils.load_model import * tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it from dataloader.load_regression_data_from_arbitrary_gp_varying_kernel import RegressionDataGeneratorArbitraryGPWithVaryingKernel diff --git a/train.py b/train.py index 34a3bf3..ccd457c 100644 --- a/train.py +++ b/train.py @@ -21,17 +21,18 @@ tfk = tf.keras tfd = tfp.distributions -# Parse arguments + +# ================================ Parse Training parameters =============================================== # parser = argparse.ArgumentParser() # parser.add_argument('-e', '--epochs', type=int, default=120, help='Number of training epochs') # parser.add_argument('-b', '--batch', type=int, default=1024, help='Batch size for training') -# parser.add_argument('-t', '--task', type=str, default='regression', help='Task to perform : (mnist|regression)') +# parser.add_argument('-t', '--task', type=str, default='regression', help='Task to perform : (mnist|regression|celeb|regression_varying)') # parser.add_argument('-c', '--num_context', type=int, default=100) # parser.add_argument('-u', '--uniform_sampling', type=bool, default=True) +# parser.add_argument('-m', '--model', type=bool, default='CNP', help='CNP|LNP|HNP|HNPC') # args = parser.parse_args() - - +# ------------------------------------------------------------------------------------------------------------------------- @@ -39,10 +40,10 @@ # ================================ Training parameters =============================================== # Regression -args = argparse.Namespace(epochs=160, batch=1024, task='regression_varying', num_context=25, uniform_sampling=True, model='HNPC') +args = argparse.Namespace(epochs=160, batch=1024, task='regression', num_context=25, uniform_sampling=True, model='CNP') # MNIST / Celeb -#args = argparse.Namespace(epochs=30, batch=256, task='mnist', num_context=100, uniform_sampling=True, model='HNPC') +#args = argparse.Namespace(epochs=30, batch=256, task='mnist', num_context=100, uniform_sampling=True, model='CNP') LOG_PRIORS = True @@ -172,8 +173,7 @@ def train_step(model, x, optimizer): # ============================ Training Loop =========================================================== epochs = args.epochs -#epochs = 160 -for epoch in range(40, epochs + 1): +for epoch in range(1, epochs + 1): with tqdm(total=TRAINING_ITERATIONS, unit='batch') as tepoch: tepoch.set_description(f"Epoch {epoch}")