diff --git a/examples/siamese_glvq_iris.py b/examples/siamese_glvq_iris.py index b00e308..ff63da9 100644 --- a/examples/siamese_glvq_iris.py +++ b/examples/siamese_glvq_iris.py @@ -51,8 +51,7 @@ if __name__ == "__main__": # Hyperparameters hparams = dict( distribution=[1, 2, 3], - proto_lr=0.01, - bb_lr=0.01, + lr=0.01, ) # Initialize the backbone diff --git a/examples/siamese_gtlvq_iris.py b/examples/siamese_gtlvq_iris.py index 4f036d1..35405e1 100644 --- a/examples/siamese_gtlvq_iris.py +++ b/examples/siamese_gtlvq_iris.py @@ -51,8 +51,7 @@ if __name__ == "__main__": # Hyperparameters hparams = dict( distribution=[1, 2, 3], - proto_lr=0.01, - bb_lr=0.01, + lr=0.01, input_dim=2, latent_dim=1, ) diff --git a/examples/warm_starting.py b/examples/warm_starting.py index a9e17dc..22acf51 100644 --- a/examples/warm_starting.py +++ b/examples/warm_starting.py @@ -55,7 +55,9 @@ if __name__ == "__main__": # Setup trainer for GNG trainer = pl.Trainer( - max_epochs=1000, + accelerator="cpu", + max_epochs=50 if args.fast_dev_run else + 1000, # 10 epochs fast dev run reproducible DIV error. callbacks=[ es, ], diff --git a/glvq_iris.ckpt b/glvq_iris.ckpt new file mode 100644 index 0000000..4579313 Binary files /dev/null and b/glvq_iris.ckpt differ diff --git a/iris.pth b/iris.pth new file mode 100644 index 0000000..7e485c0 Binary files /dev/null and b/iris.pth differ diff --git a/prototorch/models/glvq.py b/prototorch/models/glvq.py index 2ca9834..8f77fdd 100644 --- a/prototorch/models/glvq.py +++ b/prototorch/models/glvq.py @@ -123,26 +123,6 @@ class SiameseGLVQ(GLVQ): self.backbone = backbone self.both_path_gradients = both_path_gradients - def configure_optimizers(self): - proto_opt = self.optimizer(self.proto_layer.parameters(), - lr=self.hparams["proto_lr"]) - # Only add a backbone optimizer if backbone has trainable parameters - bb_params = list(self.backbone.parameters()) - if (bb_params): - bb_opt = self.optimizer(bb_params, lr=self.hparams["bb_lr"]) - optimizers = [proto_opt, bb_opt] - else: - optimizers = [proto_opt] - if self.lr_scheduler is not None: - schedulers = [] - for optimizer in optimizers: - scheduler = self.lr_scheduler(optimizer, - **self.lr_scheduler_kwargs) - schedulers.append(scheduler) - return optimizers, schedulers - else: - return optimizers - def compute_distances(self, x): protos, _ = self.proto_layer() x, protos = (arr.view(arr.size(0), -1) for arr in (x, protos)) diff --git a/prototorch/models/unsupervised.py b/prototorch/models/unsupervised.py index 8833de2..80d7d85 100644 --- a/prototorch/models/unsupervised.py +++ b/prototorch/models/unsupervised.py @@ -63,7 +63,7 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel): strict=False, ) - def training_epoch_end(self, training_step_outputs): + def on_training_epoch_end(self, training_step_outputs): self._sigma = self.hparams.sigma * np.exp( -self.current_epoch / self.trainer.max_epochs)