Skip to content

Commit

Permalink
[USGS-R#146] remove gpu rule
Browse files Browse the repository at this point in the history
  • Loading branch information
jsadler2 committed Jan 21, 2022
1 parent 02c6706 commit 4efd791
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 86 deletions.
21 changes: 0 additions & 21 deletions workflow_examples/Snakefile_basic.smk
Original file line number Diff line number Diff line change
Expand Up @@ -62,27 +62,6 @@ rule prep_io_data:
tst_val_offset = config['tst_val_offset'])


# use "train" if wanting to use GPU on HPC
# rule train:
# input:
# "{outdir}/prepped.npz"
# output:
# directory("{outdir}/trained_model/"),
# directory("{outdir}/pretrained_model/"),
# params:
# # getting the base path to put the training outputs in
# # I omit the last slash (hence '[:-1]' so the split works properly
# run_dir=lambda wildcards, output: os.path.split(output[0][:-1])[0],
# pt_epochs=config['pt_epochs'],
# ft_epochs=config['ft_epochs'],
# lamb=config['lamb'],
# shell:
# """
# module load analytics cuda10.1/toolkit/10.1.105
# run_training -e /home/jsadler/.conda/envs/rgcn --no-node-list "python {code_dir}/train_model_cli.py -o {params.run_dir} -i {input[0]} -p {params.pt_epochs} -f {params.ft_epochs} --lambdas {params.lamb} --loss_func multitask_rmse --model rgcn -s 135"
# """


model = LSTMModel(
config['hidden_size'],
recurrent_dropout=config['recurrent_dropout'],
Expand Down
23 changes: 0 additions & 23 deletions workflow_examples/Snakefile_gw.smk
Original file line number Diff line number Diff line change
Expand Up @@ -77,29 +77,6 @@ rule prep_ann_temp:
trn_offset = config['trn_offset'],
tst_val_offset = config['tst_val_offset'])

# use "train" if wanting to use GPU on HPC
#rule train:
# input:
# "{outdir}/prepped_withGW.npz"
# output:
# directory("{outdir}/trained_weights/"),
# directory("{outdir}/pretrained_weights/"),
# params:
# # getting the base path to put the training outputs in
# # I omit the last slash (hence '[:-1]' so the split works properly
# run_dir=lambda wildcards, output: os.path.split(output[0][:-1])[0],
# pt_epochs=config['pt_epochs'],
# ft_epochs=config['ft_epochs'],
# lamb=config['lamb'],
# lamb2=config['lamb2'],
# lamb3=config['lamb3'],
# loss = config['loss_type'],
# seed = config['seed']
# shell:
# """
# module load analytics cuda10.1/toolkit/10.1.105
# run_training -e /home/jbarclay/.conda/envs/rgcn --no-node-list "python {code_dir}/train_model_cli.py -o {params.run_dir} -i {input[0]} -p {params.pt_epochs} -f {params.ft_epochs} --lamb {params.lamb} --lamb2 {params.lamb2} --lamb3 {params.lamb3} --model rgcn --loss {params.loss} -s {params.seed}"
# """

#get the GW loss parameters
def get_gw_loss(input_data, temp_var="temp_c"):
Expand Down
21 changes: 0 additions & 21 deletions workflow_examples/Snakefile_pretrain_LSTM.smk
Original file line number Diff line number Diff line change
Expand Up @@ -67,27 +67,6 @@ rule prep_io_data:
tst_val_offset = config['tst_val_offset'])


# use "train" if wanting to use GPU on HPC
# rule train:
# input:
# "{outdir}/prepped.npz"
# output:
# directory("{outdir}/trained_model/"),
# directory("{outdir}/pretrained_model/"),
# params:
# # getting the base path to put the training outputs in
# # I omit the last slash (hence '[:-1]' so the split works properly
# run_dir=lambda wildcards, output: os.path.split(output[0][:-1])[0],
# pt_epochs=config['pt_epochs'],
# ft_epochs=config['ft_epochs'],
# lamb=config['lamb'],
# shell:
# """
# module load analytics cuda10.1/toolkit/10.1.105
# run_training -e /home/jsadler/.conda/envs/rgcn --no-node-list "python {code_dir}/train_model_cli.py -o {params.run_dir} -i {input[0]} -p {params.pt_epochs} -f {params.ft_epochs} --lambdas {params.lamb} --loss_func multitask_rmse --model rgcn -s 135"
# """


# Pretrain the model on process based model
rule pre_train:
input:
Expand Down
21 changes: 0 additions & 21 deletions workflow_examples/Snakefile_rgcn.smk
Original file line number Diff line number Diff line change
Expand Up @@ -65,27 +65,6 @@ rule prep_io_data:
tst_val_offset = config['tst_val_offset'])


# use "train" if wanting to use GPU on HPC
# rule train:
# input:
# "{outdir}/prepped.npz"
# output:
# directory("{outdir}/trained_model/"),
# directory("{outdir}/pretrained_model/"),
# params:
# # getting the base path to put the training outputs in
# # I omit the last slash (hence '[:-1]' so the split works properly
# run_dir=lambda wildcards, output: os.path.split(output[0][:-1])[0],
# pt_epochs=config['pt_epochs'],
# ft_epochs=config['ft_epochs'],
# lamb=config['lamb'],
# shell:
# """
# module load analytics cuda10.1/toolkit/10.1.105
# run_training -e /home/jsadler/.conda/envs/rgcn --no-node-list "python {code_dir}/train_model_cli.py -o {params.run_dir} -i {input[0]} -p {params.pt_epochs} -f {params.ft_epochs} --lambdas {params.lamb} --loss_func multitask_rmse --model rgcn -s 135"
# """


# Pretrain the model on process based model
rule pre_train:
input:
Expand Down

0 comments on commit 4efd791

Please sign in to comment.