chore: merge dev into master

This commit is contained in:
Alexander Engelsberger 2022-04-27 09:48:58 +02:00
commit 37add944b1
10 changed files with 107 additions and 18 deletions

View File

@ -1,6 +1,7 @@
MIT License MIT License
Copyright (c) 2020 si-cim Copyright (c) 2020 Saxon Institute for Computational Intelligence and Machine
Learning (SICIM)
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -120,7 +120,7 @@ html_css_files = [
# -- Options for HTMLHelp output ------------------------------------------ # -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder. # Output file base name for HTML help builder.
htmlhelp_basename = "protoflowdoc" htmlhelp_basename = "prototorchdoc"
# -- Options for LaTeX output --------------------------------------------- # -- Options for LaTeX output ---------------------------------------------

View File

@ -32,6 +32,12 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
def generate(self, num_components: int = 0): def generate(self, num_components: int = 0):
"""Ignore `num_components` and simply return `self.components`.""" """Ignore `num_components` and simply return `self.components`."""
provided_num_components = len(self.components)
if provided_num_components != num_components:
wmsg = f"The number of components ({provided_num_components}) " \
f"provided to {self.__class__.__name__} " \
f"does not match the expected number ({num_components})."
warnings.warn(wmsg)
if not isinstance(self.components, torch.Tensor): if not isinstance(self.components, torch.Tensor):
wmsg = f"Converting components to {torch.Tensor}..." wmsg = f"Converting components to {torch.Tensor}..."
warnings.warn(wmsg) warnings.warn(wmsg)
@ -231,6 +237,8 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
components = torch.tensor([]) components = torch.tensor([])
for k, v in distribution.items(): for k, v in distribution.items():
stratified_data = self.data[self.targets == k] stratified_data = self.data[self.targets == k]
if len(stratified_data) == 0:
raise ValueError(f"No data available for class {k}.")
initializer = self.subinit_type( initializer = self.subinit_type(
stratified_data, stratified_data,
noise=self.noise, noise=self.noise,
@ -457,7 +465,15 @@ class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
return self.generate_end_hook(weights) return self.generate_end_hook(weights)
class EyeTransformInitializer(AbstractLinearTransformInitializer): class RandomLinearTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with random values."""
def generate(self, in_dim: int, out_dim: int):
weights = torch.rand(in_dim, out_dim)
return self.generate_end_hook(weights)
class EyeLinearTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with the largest possible identity matrix.""" """Initialize a matrix with the largest possible identity matrix."""
def generate(self, in_dim: int, out_dim: int): def generate(self, in_dim: int, out_dim: int):
@ -496,6 +512,13 @@ class PCALinearTransformInitializer(AbstractDataAwareLTInitializer):
return self.generate_end_hook(weights) return self.generate_end_hook(weights)
class LiteralLinearTransformInitializer(AbstractDataAwareLTInitializer):
"""'Generate' the provided weights."""
def generate(self, in_dim: int, out_dim: int):
return self.generate_end_hook(self.data)
# Aliases - Components # Aliases - Components
CACI = ClassAwareCompInitializer CACI = ClassAwareCompInitializer
DACI = DataAwareCompInitializer DACI = DataAwareCompInitializer
@ -524,7 +547,9 @@ RRI = RandomReasoningsInitializer
ZRI = ZerosReasoningsInitializer ZRI = ZerosReasoningsInitializer
# Aliases - Transforms # Aliases - Transforms
Eye = EyeTransformInitializer ELTI = Eye = EyeLinearTransformInitializer
OLTI = OnesLinearTransformInitializer OLTI = OnesLinearTransformInitializer
RLTI = RandomLinearTransformInitializer
ZLTI = ZerosLinearTransformInitializer ZLTI = ZerosLinearTransformInitializer
PCALTI = PCALinearTransformInitializer PCALTI = PCALinearTransformInitializer
LLTI = LiteralLinearTransformInitializer

View File

@ -107,14 +107,24 @@ def margin_loss(y_pred, y_true, margin=0.3):
class GLVQLoss(torch.nn.Module): class GLVQLoss(torch.nn.Module):
def __init__(self, margin=0.0, transfer_fn="identity", beta=10, **kwargs): def __init__(self,
margin=0.0,
transfer_fn="identity",
beta=10,
add_dp=False,
**kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.margin = margin self.margin = margin
self.transfer_fn = get_activation(transfer_fn) self.transfer_fn = get_activation(transfer_fn)
self.beta = torch.tensor(beta) self.beta = torch.tensor(beta)
self.add_dp = add_dp
def forward(self, outputs, targets, plabels): def forward(self, outputs, targets, plabels):
mu = glvq_loss(outputs, targets, prototype_labels=plabels) # mu = glvq_loss(outputs, targets, plabels)
dp, dm = _get_dp_dm(outputs, targets, plabels)
mu = (dp - dm) / (dp + dm)
if self.add_dp:
mu = mu + dp
batch_loss = self.transfer_fn(mu + self.margin, beta=self.beta) batch_loss = self.transfer_fn(mu + self.margin, beta=self.beta)
return batch_loss.sum() return batch_loss.sum()

View File

@ -5,7 +5,7 @@ from torch.nn.parameter import Parameter
from .initializers import ( from .initializers import (
AbstractLinearTransformInitializer, AbstractLinearTransformInitializer,
EyeTransformInitializer, EyeLinearTransformInitializer,
) )
@ -16,7 +16,7 @@ class LinearTransform(torch.nn.Module):
in_dim: int, in_dim: int,
out_dim: int, out_dim: int,
initializer: initializer:
AbstractLinearTransformInitializer = EyeTransformInitializer()): AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
super().__init__() super().__init__()
self.set_weights(in_dim, out_dim, initializer) self.set_weights(in_dim, out_dim, initializer)
@ -32,12 +32,15 @@ class LinearTransform(torch.nn.Module):
in_dim: int, in_dim: int,
out_dim: int, out_dim: int,
initializer: initializer:
AbstractLinearTransformInitializer = EyeTransformInitializer()): AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
weights = initializer.generate(in_dim, out_dim) weights = initializer.generate(in_dim, out_dim)
self._register_weights(weights) self._register_weights(weights)
def forward(self, x): def forward(self, x):
return x @ self.weights return x @ self._weights
def extra_repr(self):
return f"weights: (shape: {tuple(self._weights.shape)})"
# Aliases # Aliases

View File

@ -1,6 +1,11 @@
"""ProtoFlow utils module""" """ProtoTorch utils module"""
from .colors import hex_to_rgb, rgb_to_hex from .colors import (
get_colors,
get_legend_handles,
hex_to_rgb,
rgb_to_hex,
)
from .utils import ( from .utils import (
mesh2d, mesh2d,
parse_data_arg, parse_data_arg,

View File

@ -1,4 +1,13 @@
"""ProtoFlow color utilities""" """ProtoTorch color utilities"""
import matplotlib.lines as mlines
import torch
from matplotlib import cm
from matplotlib.colors import (
Normalize,
to_hex,
to_rgb,
)
def hex_to_rgb(hex_values): def hex_to_rgb(hex_values):
@ -13,3 +22,39 @@ def rgb_to_hex(rgb_values):
for v in rgb_values: for v in rgb_values:
c = "%02x%02x%02x" % tuple(v) c = "%02x%02x%02x" % tuple(v)
yield c yield c
def get_colors(vmax, vmin=0, cmap="viridis"):
cmap = cm.get_cmap(cmap)
colornorm = Normalize(vmin=vmin, vmax=vmax)
colors = dict()
for c in range(vmin, vmax + 1):
colors[c] = to_hex(cmap(colornorm(c)))
return colors
def get_legend_handles(colors, labels, marker="dots", zero_indexed=False):
handles = list()
for color, label in zip(colors.values(), labels):
if marker == "dots":
handle = mlines.Line2D(
xdata=[],
ydata=[],
label=label,
color="white",
markerfacecolor=color,
marker="o",
markersize=10,
markeredgecolor="k",
)
else:
handle = mlines.Line2D(
xdata=[],
ydata=[],
label=label,
color=color,
marker="",
markersize=15,
)
handles.append(handle)
return handles

View File

@ -1,4 +1,4 @@
"""ProtoFlow utilities""" """ProtoTorch utilities"""
import warnings import warnings
from typing import ( from typing import (

View File

@ -23,6 +23,7 @@ INSTALL_REQUIRES = [
"torchvision>=0.7.2", "torchvision>=0.7.2",
"numpy>=1.9.1", "numpy>=1.9.1",
"sklearn", "sklearn",
"matplotlib",
] ]
DATASETS = [ DATASETS = [
"requests", "requests",
@ -40,7 +41,6 @@ DOCS = [
"sphinx-autodoc-typehints", "sphinx-autodoc-typehints",
] ]
EXAMPLES = [ EXAMPLES = [
"matplotlib",
"torchinfo", "torchinfo",
] ]
TESTS = [ TESTS = [

View File

@ -245,20 +245,20 @@ def test_random_reasonings_init_channels_not_first():
# Transform initializers # Transform initializers
def test_eye_transform_init_square(): def test_eye_transform_init_square():
t = pt.initializers.EyeTransformInitializer() t = pt.initializers.EyeLinearTransformInitializer()
I = t.generate(3, 3) I = t.generate(3, 3)
assert torch.allclose(I, torch.eye(3)) assert torch.allclose(I, torch.eye(3))
def test_eye_transform_init_narrow(): def test_eye_transform_init_narrow():
t = pt.initializers.EyeTransformInitializer() t = pt.initializers.EyeLinearTransformInitializer()
actual = t.generate(3, 2) actual = t.generate(3, 2)
desired = torch.Tensor([[1, 0], [0, 1], [0, 0]]) desired = torch.Tensor([[1, 0], [0, 1], [0, 0]])
assert torch.allclose(actual, desired) assert torch.allclose(actual, desired)
def test_eye_transform_init_wide(): def test_eye_transform_init_wide():
t = pt.initializers.EyeTransformInitializer() t = pt.initializers.EyeLinearTransformInitializer()
actual = t.generate(2, 3) actual = t.generate(2, 3)
desired = torch.Tensor([[1, 0, 0], [0, 1, 0]]) desired = torch.Tensor([[1, 0, 0], [0, 1, 0]])
assert torch.allclose(actual, desired) assert torch.allclose(actual, desired)