Skip to content

Commit

Permalink
Fixes, and docs
Browse files Browse the repository at this point in the history
  • Loading branch information
GilesStrong committed Dec 15, 2020
1 parent 2cd05e8 commit 6f1ad84
Show file tree
Hide file tree
Showing 12 changed files with 2,604 additions and 2,260 deletions.
2 changes: 1 addition & 1 deletion docs/source/lumin.nn.models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Subpackages

.. toctree::
:maxdepth: 1

lumin.nn.models.blocks
lumin.nn.models.layers

Expand Down
1,031 changes: 527 additions & 504 deletions examples/Binary_Classification_Signal_versus_Background.ipynb

Large diffs are not rendered by default.

908 changes: 463 additions & 445 deletions examples/Multi_Target_Regression_Di-tau_momenta.ipynb

Large diffs are not rendered by default.

1,009 changes: 513 additions & 496 deletions examples/Multiclass_Classification_Signal_versus_Backgrounds.ipynb

Large diffs are not rendered by default.

564 changes: 373 additions & 191 deletions examples/RNNs_CNNs_and_GNNs_for_matrix_data.ipynb

Large diffs are not rendered by default.

509 changes: 259 additions & 250 deletions examples/Simple_Binary_Classification_of_earnings.ipynb

Large diffs are not rendered by default.

735 changes: 380 additions & 355 deletions examples/Single_Target_Regression_Di-Higgs_mass_prediction.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion lumin/nn/callbacks/model_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ def get_loss(self) -> float:
Evaluates SWA model and returns loss
'''

if self.epoch <= self.start_epoch: return self.model.fit_params.loss_val
if self.epoch <= self.start_epoch: return self.model.fit_params.loss_val.data.item()
if self.loss is None:
self.test_model.set_weights(self.weights)
self.loss = self.test_model.evaluate(self.model.fit_params.by)
Expand Down
41 changes: 28 additions & 13 deletions lumin/nn/callbacks/monitors.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,9 @@ def on_forwards_end(self) -> None:

def _add_loss_name(self, name:str) -> None:
self.loss_names.append(name)
self.loss_vals.append(list(np.zeros_like(self.loss_vals[1])))
self.vel_vals.append(list(np.zeros_like(self.vel_vals[0])))
self.gen_vals.append(list(np.zeros_like(self.gen_vals[0])))
self.loss_vals.append([0 for _ in self.loss_vals[1]])
self.vel_vals.append([0 for _ in self.vel_vals[0]])
self.gen_vals.append([0 for _ in self.gen_vals[0]])

def print_losses(self) -> None:
r'''
Expand Down Expand Up @@ -359,18 +359,33 @@ def get_loss_history(self) -> Tuple[OrderedDict,OrderedDict]:
for v,c in zip(self.metric_vals,self.metric_cbs): history[1][c.name] = v
return history

def get_results(self) -> Dict[str,float]:
losses = np.array(self.loss_vals)[1:]
def get_results(self, save_best:bool) -> Dict[str,float]:
r'''
Returns losses and metrics of the (loaded) model
#TODO: extend this to load at specified index
Arguments:
save_best: if the training used :class:`~lumin.nn.callbacks.monitors.SaveBest` return results at best point else return the latest values
Returns:
dictionary of validation loss and metrics
'''

losses = np.array(self.loss_vals[1:])
metrics = np.array(self.metric_vals)
results = {}

if self.main_metric_idx is None or not self.lock_to_metric or len(losses) > 1: # Tracking SWA only supported for loss
idx = np.unravel_index(np.argmin(losses), losses.shape)[-1]
results['loss'] = np.min(losses)

if save_best:
if self.main_metric_idx is None or not self.lock_to_metric or len(losses) > 1: # Tracking SWA only supported for loss
idx = np.unravel_index(np.argmin(losses), losses.shape)[-1]
results['loss'] = np.min(losses)
else:
idx = np.argmin(self.metric_vals[self.main_metric_idx]) if self.metric_cbs[self.main_metric_idx].lower_metric_better else \
np.argmax(self.metric_vals[self.main_metric_idx])
results['loss'] = losses[0][idx]
else:
idx = np.argmin(self.metric_vals[self.main_metric_idx]) if self.metric_cbs[self.main_metric_idx].lower_metric_better else \
np.argmax(self.metric_vals[self.main_metric_idx])
results['loss'] = losses[0][idx]

results['loss'] = np.min(losses[:,-1:])
idx = -1
for c,v in zip(self.metric_cbs,metrics[:,idx]): results[c.name] = v
return results
59 changes: 57 additions & 2 deletions lumin/nn/metrics/eval_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,25 @@ def on_train_begin(self) -> None:
if hasattr(c, 'main_metric'): c.main_metric = False
self.main_metric = True

def on_epoch_begin(self) -> None: self.preds,self.metric = [],None
def on_epoch_begin(self) -> None:
r'''
Resets prediction tracking
'''

self.preds,self.metric = [],None

def on_forwards_end(self) -> None:
r'''
Save predictions from batch
'''

if self.model.fit_params.state == 'valid': self.preds.append(self.model.fit_params.y_pred.cpu().detach())

def on_epoch_end(self) -> None:
r'''
Compute metric using saved predictions
'''

if self.model.fit_params.state != 'valid': return
self.preds = to_np(torch.cat(self.preds)).squeeze()
if 'multiclass' in self.model.objective: self.preds = np.exp(self.preds)
Expand All @@ -117,7 +130,15 @@ def on_epoch_end(self) -> None:
self.metric = self.evaluate()
del self.preds

def get_metric(self) -> float: return self.metric
def get_metric(self) -> float:
r'''
Returns metric value
Returns:
metric value
'''

return self.metric

@abstractmethod
def evaluate(self) -> float:
Expand All @@ -132,11 +153,45 @@ def evaluate(self) -> float:

def evaluate_model(self, model:AbsModel, fy:FoldYielder, fold_idx:int, inputs:np.ndarray, targets:np.ndarray, weights:Optional[np.ndarray]=None,
bs:Optional[int]=None) -> float:
r'''
Gets model predicitons and computes metric value. fy and fold_idx arguments necessary in case the metric requires extra information beyond inputs,
tragets, and weights.
Arguments:
model: model to evaluate
fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` containing data
fold_idx: fold index of corresponding data
inputs: input data
targets: target data
weights: optional weights
bs: optional batch size
Returns:
metric value
'''

self.model = model
preds = self.model.predict(inputs, bs=bs)
return self.evaluate_preds(fy=fy, fold_idx=fold_idx, preds=preds, targets=targets, weights=weights, bs=bs)

def evaluate_preds(self, fy:FoldYielder, fold_idx:int, preds:np.ndarray, targets:np.ndarray, weights:Optional[np.ndarray]=None,
bs:Optional[int]=None) -> float:
r'''
Computes metric value from predictions. fy and fold_idx arguments necessary in case the metric requires extra information beyond inputs,
tragets, and weights.
Arguments:
fy: :class:`~lumin.nn.data.fold_yielder.FoldYielder` containing data
fold_idx: fold index of corresponding data
inputs: input data
targets: target data
weights: optional weights
bs: optional batch size
Returns:
metric value
'''

class MockModel():
def __init__(self): pass

Expand Down
2 changes: 1 addition & 1 deletion lumin/nn/training/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def train_models(fy:FoldYielder, n_models:int, bs:int, model_builder:ModelBuilde
cycle_losses.append([])
for c in cbs:
if hasattr(c, 'cycle_save') and c.cycle_save: cycle_losses[-1] = c.cycle_losses
results.append(metric_log.get_results())
results.append(metric_log.get_results(save_best=True))
print(f"Scores are: {results[-1]}")
results[-1]['path'] = model_dir
with open(savepath/'results_file.pkl', 'wb') as fout: pickle.dump(results, fout)
Expand Down
2 changes: 1 addition & 1 deletion lumin/utils/multiprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def mp_run(args:List[Dict[Any,Any]], func:Callable[[Any],Any]) -> Dict[Any,Any]:
func: function to which to pass dictionary arguments
Returns:
DIctionary of results
Dictionary of results
'''

procs = []
Expand Down

0 comments on commit 6f1ad84

Please sign in to comment.