Fix broken state from previous commit

This commit is contained in:
Jensun Ravichandran 2021-04-21 21:35:52 +02:00
parent fa7b178028
commit e5a62bd0fc
2 changed files with 77 additions and 49 deletions

View File

@ -17,6 +17,21 @@ class NumpyDataset(TensorDataset):
super().__init__(*tensors)
class GLVQIris(GLVQ):
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser],
add_help=False)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=1e-1)
parser.add_argument("--batch_size", type=int, default=150)
parser.add_argument("--prototypes_per_class", type=int, default=3)
parser.add_argument("--prototype_initializer",
type=str,
default="stratified_mean")
return parser
class VisualizationCallback(pl.Callback):
def __init__(self,
x_train,
@ -62,30 +77,9 @@ class VisualizationCallback(pl.Callback):
if __name__ == "__main__":
# Hyperparameters
parser = argparse.ArgumentParser()
parser.add_argument("--epochs",
type=int,
default=100,
help="Epochs to train.")
parser.add_argument("--lr",
type=float,
default=0.001,
help="Learning rate.")
parser.add_argument("--batch_size",
type=int,
default=256,
help="Batch size.")
parser.add_argument("--gpus",
type=int,
default=0,
help="Number of GPUs to use.")
parser.add_argument("--ppc",
type=int,
default=1,
help="Prototypes-Per-Class.")
args = parser.parse_args()
# For best-practices when using `argparse` with `pytorch_lightning`, see
# https://pytorch-lightning.readthedocs.io/en/stable/common/hyperparameters.html
parser = argparse.ArgumentParser()
# Dataset
x_train, y_train = load_iris(return_X_y=True)
@ -95,32 +89,35 @@ if __name__ == "__main__":
# Dataloaders
train_loader = DataLoader(train_ds, num_workers=0, batch_size=150)
# Initialize the model
model = GLVQ(
input_dim=x_train.shape[1],
nclasses=3,
prototype_distribution=[2, 7, 5],
prototype_initializer="stratified_mean",
data=[x_train, y_train],
lr=0.01,
)
# Model summary
print(model)
# Add model specific args
parser = GLVQIris.add_model_specific_args(parser)
# Callbacks
vis = VisualizationCallback(x_train, y_train)
# Automatically add trainer-specific-args like `--gpus`, `--num_nodes` etc.
parser = pl.Trainer.add_argparse_args(parser)
# Setup trainer
trainer = pl.Trainer(
max_epochs=hparams.epochs,
auto_lr_find=
True, # finds learning rate automatically with `trainer.tune(model)`
trainer = pl.Trainer.from_argparse_args(
parser,
callbacks=[
vis, # comment this line out to disable the visualization
],
)
trainer.tune(model)
# trainer.tune(model)
# Initialize the model
args = parser.parse_args()
model = GLVQIris(
args,
input_dim=x_train.shape[1],
nclasses=3,
data=[x_train, y_train],
)
# Model summary
print(model)
# Training loop
trainer.fit(model, train_loader)
@ -130,6 +127,6 @@ if __name__ == "__main__":
trainer.save_checkpoint(ckpt)
# Load the checkpoint
new_model = GLVQ.load_from_checkpoint(checkpoint_path=ckpt)
new_model = GLVQIris.load_from_checkpoint(checkpoint_path=ckpt)
print(new_model)

View File

@ -1,3 +1,5 @@
import argparse
import pytorch_lightning as pl
import torch
import torchmetrics
@ -10,10 +12,21 @@ from prototorch.modules.prototypes import Prototypes1D
class GLVQ(pl.LightningModule):
"""Generalized Learning Vector Quantization."""
def __init__(self, hparams):
def __init__(self, hparams, input_dim, nclasses, **kwargs):
super().__init__()
self.lr = hparams.lr
self.proto_layer = Prototypes1D(**kwargs)
self.hparams = hparams
# self.save_hyperparameters(
# "lr",
# "prototypes_per_class",
# "prototype_initializer",
# )
self.proto_layer = Prototypes1D(
input_dim=input_dim,
nclasses=nclasses,
prototypes_per_class=hparams.prototypes_per_class,
prototype_initializer=hparams.prototype_initializer,
**kwargs)
self.train_acc = torchmetrics.Accuracy()
@property
@ -24,15 +37,28 @@ class GLVQ(pl.LightningModule):
def prototype_labels(self):
return self.proto_layer.prototype_labels.detach().numpy()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(parents=[parent_parser],
add_help=False)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--lr", type=float, default=1e-2)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--prototypes_per_class", type=int, default=1)
parser.add_argument("--prototype_initializer",
type=str,
default="zeros")
return parser
def forward(self, x):
protos = self.proto_layer.prototypes
dis = euclidean_distance(x, protos)
return dis
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
def training_step(self, train_batch, batch_idx):
x, y = train_batch
x = x.view(x.size(0), -1)
@ -44,8 +70,13 @@ class GLVQ(pl.LightningModule):
with torch.no_grad():
preds = wtac(dis, plabels)
# self.train_acc.update(preds.int(), y.int())
self.train_acc(preds.int(), y.int()) # FloatTensors are assumed to be class probabilities
self.log("Training Accuracy", self.train_acc, on_step=False, on_epoch=True)
self.train_acc(
preds.int(),
y.int()) # FloatTensors are assumed to be class probabilities
self.log("Training Accuracy",
self.train_acc,
on_step=False,
on_epoch=True)
return loss
# def training_epoch_end(self, outs):