fix: training loss is a zero dimensional tensor

Should fix the problem with EarlyStopping callback.
This commit is contained in:
Alexander Engelsberger 2021-06-25 16:56:10 +02:00 committed by Alexander Engelsberger
parent f56ec44afe
commit 7b9b767113
7 changed files with 10 additions and 13 deletions

View File

@ -1,12 +1,11 @@
"""GMLVQ example using the MNIST dataset.""" """GMLVQ example using the MNIST dataset."""
import torch
from pytorch_lightning.utilities.cli import LightningCLI
import prototorch as pt import prototorch as pt
import torch
from prototorch.models import ImageGMLVQ from prototorch.models import ImageGMLVQ
from prototorch.models.abstract import PrototypeModel from prototorch.models.abstract import PrototypeModel
from prototorch.models.data import MNISTDataModule from prototorch.models.data import MNISTDataModule
from pytorch_lightning.utilities.cli import LightningCLI
class ExperimentClass(ImageGMLVQ): class ExperimentClass(ImageGMLVQ):

View File

@ -2,12 +2,11 @@
import argparse import argparse
import prototorch as pt
import pytorch_lightning as pl import pytorch_lightning as pl
import torch import torch
from sklearn.datasets import load_iris from sklearn.datasets import load_iris
import prototorch as pt
if __name__ == "__main__": if __name__ == "__main__":
# Command-line arguments # Command-line arguments
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()

View File

@ -48,7 +48,7 @@ class CBC(SiameseGLVQ):
y_pred = self(x) y_pred = self(x)
num_classes = self.num_classes num_classes = self.num_classes
y_true = torch.nn.functional.one_hot(y.long(), num_classes=num_classes) y_true = torch.nn.functional.one_hot(y.long(), num_classes=num_classes)
loss = self.loss(y_pred, y_true).mean(dim=0) loss = self.loss(y_pred, y_true).mean()
return y_pred, loss return y_pred, loss
def training_step(self, batch, batch_idx, optimizer_idx=None): def training_step(self, batch, batch_idx, optimizer_idx=None):

View File

@ -5,13 +5,12 @@ Mainly used for PytorchLightningCLI configurations.
""" """
from typing import Any, Optional, Type from typing import Any, Optional, Type
import prototorch as pt
import pytorch_lightning as pl import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset, random_split from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms from torchvision import transforms
from torchvision.datasets import MNIST from torchvision.datasets import MNIST
import prototorch as pt
# MNIST # MNIST
class MNISTDataModule(pl.LightningDataModule): class MNISTDataModule(pl.LightningDataModule):

View File

@ -58,7 +58,7 @@ class GLVQ(SupervisedPrototypeModel):
plabels = self.proto_layer.labels plabels = self.proto_layer.labels
mu = self.loss(out, y, prototype_labels=plabels) mu = self.loss(out, y, prototype_labels=plabels)
batch_loss = self.transfer_layer(mu, beta=self.hparams.transfer_beta) batch_loss = self.transfer_layer(mu, beta=self.hparams.transfer_beta)
loss = batch_loss.sum(dim=0) loss = batch_loss.sum()
return out, loss return out, loss
def training_step(self, batch, batch_idx, optimizer_idx=None): def training_step(self, batch, batch_idx, optimizer_idx=None):

View File

@ -24,7 +24,7 @@ class CELVQ(GLVQ):
winning = stratified_min_pooling(out, plabels) # [None, num_classes] winning = stratified_min_pooling(out, plabels) # [None, num_classes]
probs = -1.0 * winning probs = -1.0 * winning
batch_loss = self.loss(probs, y.long()) batch_loss = self.loss(probs, y.long())
loss = batch_loss.sum(dim=0) loss = batch_loss.sum()
return out, loss return out, loss
@ -56,7 +56,7 @@ class ProbabilisticLVQ(GLVQ):
out = self.forward(x) out = self.forward(x)
plabels = self.proto_layer.labels plabels = self.proto_layer.labels
batch_loss = self.loss(out, y, plabels) batch_loss = self.loss(out, y, plabels)
loss = batch_loss.sum(dim=0) loss = batch_loss.sum()
return loss return loss
@ -92,5 +92,5 @@ class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):
# x, y = batch # x, y = batch
# y_pred = self(x) # y_pred = self(x)
# batch_loss = self.loss(y_pred, y) # batch_loss = self.loss(y_pred, y)
# loss = batch_loss.sum(dim=0) # loss = batch_loss.sum()
# return loss # return loss

View File

@ -132,7 +132,7 @@ class GrowingNeuralGas(NeuralGas):
mask[torch.arange(len(mask)), winner] = 1.0 mask[torch.arange(len(mask)), winner] = 1.0
dp = d * mask dp = d * mask
self.errors += torch.sum(dp * dp, dim=0) self.errors += torch.sum(dp * dp)
self.errors *= self.hparams.step_reduction self.errors *= self.hparams.step_reduction
self.topology_layer(d) self.topology_layer(d)