Skip to content

Commit

Permalink
Cleaned code and revised Readme
Browse files Browse the repository at this point in the history
  • Loading branch information
William-Baker committed Mar 27, 2023
1 parent 4149559 commit a8db14e
Show file tree
Hide file tree
Showing 9 changed files with 33 additions and 27 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ CNP, LNP and HNP all have a similar encoder-decoder architecture. They have been


## 4. Experiments
Training can either be conducted in a interactive session (iPython) with arguments set in the section beginning ln 40 (Training parameters). Or by commenting section ln40 and uncommenting section ln 25 (Parse Training parameters) the terminal and it's cmd arguments can be used.

### 4.1. Regression training

Expand Down
1 change: 0 additions & 1 deletion examples/deprecated/legacy_train_cnp.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#%%
import os
#os.chdir("c:Users/baker/Documents/MLMI4/conditional-neural-processes/")
import argparse
from datetime import datetime

Expand Down
12 changes: 4 additions & 8 deletions results/results_graphics.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it
import matplotlib.pyplot as plt
Expand All @@ -13,7 +15,6 @@
pth = f'.data/{args.model}_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/'# + "cp-0030.ckpt"
model, train_ds, test_ds = load_model_and_dataset(args, pth)

#model.load_weights(pth)

#%%

Expand All @@ -24,11 +25,6 @@
#for i, num_context in enumerate([1,10,100,1000]):#([1,10,100,1000]):
for i, num_context in enumerate([1,10,100,1000]):#([1,10,100,1000]):

#model.load_weights(f'trained_models/model_{args.task}_context_{num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0015.ckpt")
#model.load_weights(f'.data/CNP2_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0010.ckpt")

#model.load_weights(f'.data/CNP2_model_{args.task}_context_{args.num_context}_uniform_sampling_{args.uniform_sampling}/' + "cp-0010.ckpt")


if args.task == 'celeb':

Expand Down
6 changes: 4 additions & 2 deletions results/results_half.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it
from dataloader.load_mnist import half_load_mnist
Expand Down
6 changes: 4 additions & 2 deletions results/results_latent_prior_vs_posterior.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it

Expand Down
6 changes: 4 additions & 2 deletions results/results_regression.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it
import matplotlib.pyplot as plt
Expand Down
6 changes: 4 additions & 2 deletions results/results_split.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it
from dataloader.load_mnist import split_load_mnist
Expand Down
6 changes: 4 additions & 2 deletions results/results_varying_kernel.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#%%
import os
os.chdir("/Users/baker/Documents/MLMI4/conditional-neural-processes/")
import os, sys
pth = os.path.abspath(os.path.join(os.getcwd(), ".."))
os.chdir(pth) # change to project directory
sys.path.append(pth)
from utils.load_model import *
tf.config.set_visible_devices([], 'GPU') # Disable the GPU if present, we wont need it
from dataloader.load_regression_data_from_arbitrary_gp_varying_kernel import RegressionDataGeneratorArbitraryGPWithVaryingKernel
Expand Down
16 changes: 8 additions & 8 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,28 +21,29 @@
tfk = tf.keras
tfd = tfp.distributions

# Parse arguments

# ================================ Parse Training parameters ===============================================
# parser = argparse.ArgumentParser()
# parser.add_argument('-e', '--epochs', type=int, default=120, help='Number of training epochs')
# parser.add_argument('-b', '--batch', type=int, default=1024, help='Batch size for training')
# parser.add_argument('-t', '--task', type=str, default='regression', help='Task to perform : (mnist|regression)')
# parser.add_argument('-t', '--task', type=str, default='regression', help='Task to perform : (mnist|regression|celeb|regression_varying)')
# parser.add_argument('-c', '--num_context', type=int, default=100)
# parser.add_argument('-u', '--uniform_sampling', type=bool, default=True)
# parser.add_argument('-m', '--model', type=bool, default='CNP', help='CNP|LNP|HNP|HNPC')
# args = parser.parse_args()



# -------------------------------------------------------------------------------------------------------------------------




# ================================ Training parameters ===============================================

# Regression
args = argparse.Namespace(epochs=160, batch=1024, task='regression_varying', num_context=25, uniform_sampling=True, model='HNPC')
args = argparse.Namespace(epochs=160, batch=1024, task='regression', num_context=25, uniform_sampling=True, model='CNP')

# MNIST / Celeb
#args = argparse.Namespace(epochs=30, batch=256, task='mnist', num_context=100, uniform_sampling=True, model='HNPC')
#args = argparse.Namespace(epochs=30, batch=256, task='mnist', num_context=100, uniform_sampling=True, model='CNP')

LOG_PRIORS = True

Expand Down Expand Up @@ -172,8 +173,7 @@ def train_step(model, x, optimizer):

# ============================ Training Loop ===========================================================
epochs = args.epochs
#epochs = 160
for epoch in range(40, epochs + 1):
for epoch in range(1, epochs + 1):
with tqdm(total=TRAINING_ITERATIONS, unit='batch') as tepoch:
tepoch.set_description(f"Epoch {epoch}")

Expand Down

0 comments on commit a8db14e

Please sign in to comment.