prototorch_models/prototorch/models/cbc.py

78 lines
2.8 KiB
Python
Raw Normal View History

2021-04-22 14:01:44 +00:00
import torch
import torchmetrics
from ..core.competitions import CBCC
from ..core.components import ReasoningComponents
from ..core.initializers import RandomReasoningsInitializer
from ..core.losses import MarginLoss
from ..core.similarities import euclidean_similarity
from ..nn.wrappers import LambdaLayer
2021-06-04 20:20:32 +00:00
from .abstract import ImagePrototypesMixin
from .glvq import SiameseGLVQ
2021-04-22 14:01:44 +00:00
class CBC(SiameseGLVQ):
2021-04-22 14:01:44 +00:00
"""Classification-By-Components."""
def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs)
2021-04-22 14:01:44 +00:00
similarity_fn = kwargs.get("similarity_fn", euclidean_similarity)
components_initializer = kwargs.get("components_initializer", None)
reasonings_initializer = kwargs.get("reasonings_initializer",
RandomReasoningsInitializer())
self.components_layer = ReasoningComponents(
self.hparams.distribution,
components_initializer=components_initializer,
reasonings_initializer=reasonings_initializer,
)
self.similarity_layer = LambdaLayer(similarity_fn)
self.competition_layer = CBCC()
# Namespace hook
self.proto_layer = self.components_layer
self.loss = MarginLoss(self.hparams.margin)
2021-04-22 14:01:44 +00:00
def forward(self, x):
components, reasonings = self.components_layer()
2021-04-22 14:01:44 +00:00
latent_x = self.backbone(x)
self.backbone.requires_grad_(self.both_path_gradients)
latent_components = self.backbone(components)
self.backbone.requires_grad_(True)
detections = self.similarity_layer(latent_x, latent_components)
probs = self.competition_layer(detections, reasonings)
2021-04-22 14:01:44 +00:00
return probs
def shared_step(self, batch, batch_idx, optimizer_idx=None):
x, y = batch
2021-04-22 14:01:44 +00:00
y_pred = self(x)
num_classes = self.num_classes
2021-05-25 13:41:10 +00:00
y_true = torch.nn.functional.one_hot(y.long(), num_classes=num_classes)
loss = self.loss(y_pred, y_true).mean(dim=0)
return y_pred, loss
def training_step(self, batch, batch_idx, optimizer_idx=None):
y_pred, train_loss = self.shared_step(batch, batch_idx, optimizer_idx)
preds = torch.argmax(y_pred, dim=1)
accuracy = torchmetrics.functional.accuracy(preds.int(),
batch[1].int())
self.log("train_acc",
accuracy,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True)
return train_loss
2021-04-22 14:01:44 +00:00
def predict(self, x):
with torch.no_grad():
y_pred = self(x)
y_pred = torch.argmax(y_pred, dim=1)
2021-05-13 13:22:01 +00:00
return y_pred
2021-04-22 14:01:44 +00:00
2021-06-04 20:20:32 +00:00
class ImageCBC(ImagePrototypesMixin, CBC):
2021-04-22 14:01:44 +00:00
"""CBC model that constrains the components to the range [0, 1] by
clamping after updates.
"""