Refactor code

This commit is contained in:
Jensun Ravichandran 2021-05-20 14:40:02 +02:00
parent 969fb34cc3
commit df061cc2ff
2 changed files with 25 additions and 24 deletions

View File

@ -23,8 +23,8 @@ if __name__ == "__main__":
distribution=(nclasses, prototypes_per_class),
input_dim=100,
latent_dim=2,
proto_lr=0.005,
bb_lr=0.005,
proto_lr=0.001,
bb_lr=0.001,
)
# Initialize the model
@ -33,12 +33,17 @@ if __name__ == "__main__":
# Callbacks
vis = pt.models.VisSiameseGLVQ2D(train_ds, border=0.1)
es = pl.callbacks.EarlyStopping(monitor="val_loss",
min_delta=0.001,
patience=3,
verbose=False,
mode="min")
# Setup trainer
trainer = pl.Trainer(
gpus=0,
max_epochs=20,
callbacks=[vis],
max_epochs=100,
callbacks=[vis, es],
weights_summary=None,
)
@ -55,5 +60,4 @@ if __name__ == "__main__":
saved_model.show_lambda()
# Testing
# TODO
# trainer.test(model, test_dataloaders=test_loader)
trainer.test(model, test_dataloaders=test_loader)

View File

@ -54,12 +54,20 @@ class GLVQ(AbstractPrototypeModel):
distances = self.distance_fn(x, protos)
return distances
def log_acc(self, distances, targets, tag):
plabels = self.proto_layer.component_labels
# Compute training accuracy
def predict_from_distances(self, distances):
with torch.no_grad():
preds = wtac(distances, plabels)
plabels = self.proto_layer.component_labels
y_pred = wtac(distances, plabels)
return y_pred
def predict(self, x):
with torch.no_grad():
distances = self(x)
y_pred = self.predict_from_distances(distances)
return y_pred
def log_acc(self, distances, targets, tag):
preds = self.predict_from_distances(distances)
self.acc_metric(preds.int(), targets.int())
# `.int()` because FloatTensors are assumed to be class probabilities
@ -81,7 +89,6 @@ class GLVQ(AbstractPrototypeModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
out, train_loss = self.shared_step(batch, batch_idx, optimizer_idx)
self.log("train_loss", train_loss)
self.log_acc(out, batch[-1], tag="train_acc")
return train_loss
@ -95,28 +102,18 @@ class GLVQ(AbstractPrototypeModel):
def test_step(self, batch, batch_idx):
# `model.eval()` and `torch.no_grad()` handled by pl
out, test_loss = self.shared_step(batch, batch_idx)
self.log_acc(out, batch[-1], tag="test_acc")
return test_loss
def test_epoch_end(self, outputs):
total_loss = 0
test_loss = 0.0
for batch_loss in outputs:
total_loss += batch_loss.item()
self.log('test_loss', total_loss)
test_loss += batch_loss.item()
self.log("test_loss", test_loss)
# def predict_step(self, batch, batch_idx, dataloader_idx=None):
# pass
def predict(self, x):
self.eval()
with torch.no_grad():
d = self(x)
plabels = self.proto_layer.component_labels
y_pred = wtac(d, plabels)
return y_pred
def __repr__(self):
super_repr = super().__repr__()
return f"{super_repr}"