Added Vis for GMLVQ with more then 2 dims using PCA (#11)
* Added Vis for GMLVQ with more then 2 dims using PCA * Added initialization possibility to GMlVQ with PCA and one example with omega init + PCA vis of 3 dims * test(githooks): Add githooks for automatic commit checks Co-authored-by: staps@hs-mittweida.de <staps@hs-mittweida.de> Co-authored-by: Alexander Engelsberger <alexanderengelsberger@gmail.com>
This commit is contained in:
@@ -7,6 +7,7 @@ from prototorch.functions.distances import (lomega_distance, omega_distance,
|
||||
squared_euclidean_distance)
|
||||
from prototorch.functions.helper import get_flat
|
||||
from prototorch.functions.losses import glvq_loss, lvq1_loss, lvq21_loss
|
||||
from prototorch.components import LinearMapping
|
||||
from prototorch.modules import LambdaLayer, LossLayer
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
@@ -239,11 +240,18 @@ class GMLVQ(GLVQ):
|
||||
super().__init__(hparams, distance_fn=distance_fn, **kwargs)
|
||||
|
||||
# Additional parameters
|
||||
omega = torch.randn(self.hparams.input_dim,
|
||||
self.hparams.latent_dim,
|
||||
device=self.device)
|
||||
self.register_parameter("_omega", Parameter(omega))
|
||||
omega_initializer = kwargs.get("omega_initializer", None)
|
||||
initialized_omega = kwargs.get("initialized_omega", None)
|
||||
if omega_initializer is not None or initialized_omega is not None:
|
||||
self.omega_layer = LinearMapping(
|
||||
mapping_shape=(self.hparams.input_dim, self.hparams.latent_dim),
|
||||
initializer=omega_initializer,
|
||||
initialized_linearmapping=initialized_omega,
|
||||
)
|
||||
|
||||
self.register_parameter("_omega", Parameter(self.omega_layer.mapping))
|
||||
self.backbone = LambdaLayer(lambda x: x @ self._omega, name = "omega matrix")
|
||||
|
||||
@property
|
||||
def omega_matrix(self):
|
||||
return self._omega.detach().cpu()
|
||||
@@ -256,6 +264,24 @@ class GMLVQ(GLVQ):
|
||||
def extra_repr(self):
|
||||
return f"(omega): (shape: {tuple(self._omega.shape)})"
|
||||
|
||||
def predict_latent(self, x, map_protos=True):
|
||||
"""Predict `x` assuming it is already embedded in the latent space.
|
||||
|
||||
Only the prototypes are embedded in the latent space using the
|
||||
backbone.
|
||||
|
||||
"""
|
||||
self.eval()
|
||||
with torch.no_grad():
|
||||
protos, plabels = self.proto_layer()
|
||||
if map_protos:
|
||||
protos = self.backbone(protos)
|
||||
d = squared_euclidean_distance(x, protos)
|
||||
y_pred = wtac(d, plabels)
|
||||
return y_pred
|
||||
|
||||
|
||||
|
||||
|
||||
class LGMLVQ(GMLVQ):
|
||||
"""Localized and Generalized Matrix Learning Vector Quantization."""
|
||||
|
Reference in New Issue
Block a user