2021-05-25 18:26:15 +00:00
|
|
|
"""Probabilistic GLVQ methods"""
|
|
|
|
|
|
|
|
import torch
|
2022-05-16 09:12:53 +00:00
|
|
|
from prototorch.core.losses import nllr_loss, rslvq_loss
|
|
|
|
from prototorch.core.pooling import (
|
|
|
|
stratified_min_pooling,
|
|
|
|
stratified_sum_pooling,
|
|
|
|
)
|
|
|
|
from prototorch.nn.wrappers import LossLayer
|
2021-05-25 18:26:15 +00:00
|
|
|
|
2021-06-14 18:08:08 +00:00
|
|
|
from .extras import GaussianPrior, RankScaledGaussianPrior
|
2021-06-08 13:01:08 +00:00
|
|
|
from .glvq import GLVQ, SiameseGMLVQ
|
2021-05-25 18:26:15 +00:00
|
|
|
|
|
|
|
|
2021-06-01 21:39:06 +00:00
|
|
|
class CELVQ(GLVQ):
|
|
|
|
"""Cross-Entropy Learning Vector Quantization."""
|
2022-01-11 17:28:50 +00:00
|
|
|
|
2021-06-01 21:39:06 +00:00
|
|
|
def __init__(self, hparams, **kwargs):
|
|
|
|
super().__init__(hparams, **kwargs)
|
2021-06-04 20:20:32 +00:00
|
|
|
|
|
|
|
# Loss
|
2021-06-01 21:39:06 +00:00
|
|
|
self.loss = torch.nn.CrossEntropyLoss()
|
|
|
|
|
|
|
|
def shared_step(self, batch, batch_idx, optimizer_idx=None):
|
|
|
|
x, y = batch
|
2021-06-04 20:20:32 +00:00
|
|
|
out = self.compute_distances(x) # [None, num_protos]
|
2021-08-05 07:14:32 +00:00
|
|
|
_, plabels = self.proto_layer()
|
2021-06-04 20:20:32 +00:00
|
|
|
winning = stratified_min_pooling(out, plabels) # [None, num_classes]
|
|
|
|
probs = -1.0 * winning
|
2021-06-01 21:39:06 +00:00
|
|
|
batch_loss = self.loss(probs, y.long())
|
2021-06-25 14:56:10 +00:00
|
|
|
loss = batch_loss.sum()
|
2021-06-01 21:39:06 +00:00
|
|
|
return out, loss
|
|
|
|
|
|
|
|
|
2021-05-28 15:13:06 +00:00
|
|
|
class ProbabilisticLVQ(GLVQ):
|
2022-01-11 17:28:50 +00:00
|
|
|
|
2021-05-31 15:56:45 +00:00
|
|
|
def __init__(self, hparams, rejection_confidence=0.0, **kwargs):
|
2021-05-25 18:26:15 +00:00
|
|
|
super().__init__(hparams, **kwargs)
|
|
|
|
|
2021-05-28 15:13:06 +00:00
|
|
|
self.rejection_confidence = rejection_confidence
|
2022-05-17 10:00:52 +00:00
|
|
|
self._conditional_distribution = None
|
2021-05-25 18:26:15 +00:00
|
|
|
|
|
|
|
def forward(self, x):
|
2021-06-04 20:20:32 +00:00
|
|
|
distances = self.compute_distances(x)
|
2022-05-17 10:00:52 +00:00
|
|
|
|
2021-06-08 13:01:08 +00:00
|
|
|
conditional = self.conditional_distribution(distances)
|
2021-06-03 12:05:44 +00:00
|
|
|
prior = (1. / self.num_prototypes) * torch.ones(self.num_prototypes,
|
|
|
|
device=self.device)
|
2021-05-25 18:26:15 +00:00
|
|
|
posterior = conditional * prior
|
2022-05-17 10:00:52 +00:00
|
|
|
|
2021-06-01 15:44:10 +00:00
|
|
|
plabels = self.proto_layer._labels
|
2022-05-17 10:00:52 +00:00
|
|
|
if isinstance(plabels, torch.LongTensor) or isinstance(
|
|
|
|
plabels, torch.cuda.LongTensor): # type: ignore
|
|
|
|
y_pred = stratified_sum_pooling(posterior, plabels) # type: ignore
|
|
|
|
else:
|
|
|
|
raise ValueError("Labels must be LongTensor.")
|
|
|
|
|
2021-05-25 18:26:15 +00:00
|
|
|
return y_pred
|
|
|
|
|
2021-06-01 15:44:10 +00:00
|
|
|
def predict(self, x):
|
|
|
|
y_pred = self.forward(x)
|
|
|
|
confidence, prediction = torch.max(y_pred, dim=1)
|
|
|
|
prediction[confidence < self.rejection_confidence] = -1
|
|
|
|
return prediction
|
|
|
|
|
2021-05-25 18:26:15 +00:00
|
|
|
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
2021-06-04 20:20:32 +00:00
|
|
|
x, y = batch
|
|
|
|
out = self.forward(x)
|
2021-08-05 07:14:32 +00:00
|
|
|
_, plabels = self.proto_layer()
|
2021-06-04 20:20:32 +00:00
|
|
|
batch_loss = self.loss(out, y, plabels)
|
2021-06-25 14:56:10 +00:00
|
|
|
loss = batch_loss.sum()
|
2021-05-25 18:26:15 +00:00
|
|
|
return loss
|
|
|
|
|
2022-05-17 10:00:52 +00:00
|
|
|
def conditional_distribution(self, distances):
|
|
|
|
"""Conditional distribution of distances."""
|
|
|
|
if self._conditional_distribution is None:
|
|
|
|
raise ValueError("Conditional distribution is not set.")
|
|
|
|
return self._conditional_distribution(distances)
|
|
|
|
|
2021-05-28 15:13:06 +00:00
|
|
|
|
2021-06-08 06:38:11 +00:00
|
|
|
class SLVQ(ProbabilisticLVQ):
|
|
|
|
"""Soft Learning Vector Quantization."""
|
2022-01-11 17:28:50 +00:00
|
|
|
|
2021-05-28 18:39:32 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
2022-03-30 13:12:33 +00:00
|
|
|
|
|
|
|
# Default hparams
|
|
|
|
self.hparams.setdefault("variance", 1.0)
|
|
|
|
variance = self.hparams.get("variance")
|
|
|
|
|
2022-05-17 10:00:52 +00:00
|
|
|
self._conditional_distribution = GaussianPrior(variance)
|
2021-06-04 20:20:32 +00:00
|
|
|
self.loss = LossLayer(nllr_loss)
|
2021-05-28 15:13:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
class RSLVQ(ProbabilisticLVQ):
|
2021-05-31 15:56:45 +00:00
|
|
|
"""Robust Soft Learning Vector Quantization."""
|
2022-01-11 17:28:50 +00:00
|
|
|
|
2021-05-28 18:39:32 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
2022-03-30 13:12:33 +00:00
|
|
|
|
|
|
|
# Default hparams
|
|
|
|
self.hparams.setdefault("variance", 1.0)
|
|
|
|
variance = self.hparams.get("variance")
|
|
|
|
|
2022-05-17 10:00:52 +00:00
|
|
|
self._conditional_distribution = GaussianPrior(variance)
|
2021-06-04 20:20:32 +00:00
|
|
|
self.loss = LossLayer(rslvq_loss)
|
2021-06-08 13:01:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):
|
|
|
|
"""Probabilistic Learning Vector Quantization.
|
|
|
|
|
|
|
|
TODO: Use Backbone LVQ instead
|
|
|
|
"""
|
2022-01-11 17:28:50 +00:00
|
|
|
|
2021-06-08 13:01:08 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
2022-03-30 13:12:33 +00:00
|
|
|
|
|
|
|
# Default hparams
|
|
|
|
self.hparams.setdefault("lambda", 1.0)
|
|
|
|
lam = self.hparams.get("lambda", 1.0)
|
|
|
|
|
|
|
|
self.conditional_distribution = RankScaledGaussianPrior(lam)
|
2021-06-08 13:01:08 +00:00
|
|
|
self.loss = torch.nn.KLDivLoss()
|
|
|
|
|
2021-06-14 18:56:38 +00:00
|
|
|
# FIXME
|
|
|
|
# def training_step(self, batch, batch_idx, optimizer_idx=None):
|
|
|
|
# x, y = batch
|
|
|
|
# y_pred = self(x)
|
|
|
|
# batch_loss = self.loss(y_pred, y)
|
2021-06-25 14:56:10 +00:00
|
|
|
# loss = batch_loss.sum()
|
2021-06-14 18:56:38 +00:00
|
|
|
# return loss
|