Use github actions for CI (#10)
* chore: Absolute imports * feat: Add new mesh util * chore: replace bumpversion original fork no longer maintained, move config * ci: remove old configuration files * ci: update github action * ci: add python 3.10 test * chore: update pre-commit hooks * ci: update supported python versions supported are 3.7, 3.8 and 3.9. 3.6 had EOL in december 2021. 3.10 has no pytorch distribution yet. * ci: add windows test * ci: update action less windows tests, pre commit * ci: fix typo * chore: run precommit for all files * ci: two step tests * ci: compatibility waits for style * fix: init file had missing imports * ci: add deployment script * ci: skip complete publish step * ci: cleanup readme
This commit is contained in:
committed by
GitHub
parent
b49b7a2d41
commit
a28601751e
@@ -48,6 +48,7 @@ class WTAC(torch.nn.Module):
|
||||
Thin wrapper over the `wtac` function.
|
||||
|
||||
"""
|
||||
|
||||
def forward(self, distances, labels): # pylint: disable=no-self-use
|
||||
return wtac(distances, labels)
|
||||
|
||||
@@ -58,6 +59,7 @@ class LTAC(torch.nn.Module):
|
||||
Thin wrapper over the `wtac` function.
|
||||
|
||||
"""
|
||||
|
||||
def forward(self, probs, labels): # pylint: disable=no-self-use
|
||||
return wtac(-1.0 * probs, labels)
|
||||
|
||||
@@ -68,6 +70,7 @@ class KNNC(torch.nn.Module):
|
||||
Thin wrapper over the `knnc` function.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, k=1, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.k = k
|
||||
@@ -85,5 +88,6 @@ class CBCC(torch.nn.Module):
|
||||
Thin wrapper over the `cbcc` function.
|
||||
|
||||
"""
|
||||
|
||||
def forward(self, detections, reasonings): # pylint: disable=no-self-use
|
||||
return cbcc(detections, reasonings)
|
||||
|
@@ -6,7 +6,8 @@ from typing import Union
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from ..utils import parse_distribution
|
||||
from prototorch.utils import parse_distribution
|
||||
|
||||
from .initializers import (
|
||||
AbstractClassAwareCompInitializer,
|
||||
AbstractComponentsInitializer,
|
||||
@@ -63,6 +64,7 @@ def get_cikwargs(init, distribution):
|
||||
|
||||
class AbstractComponents(torch.nn.Module):
|
||||
"""Abstract class for all components modules."""
|
||||
|
||||
@property
|
||||
def num_components(self):
|
||||
"""Current number of components."""
|
||||
@@ -85,6 +87,7 @@ class AbstractComponents(torch.nn.Module):
|
||||
|
||||
class Components(AbstractComponents):
|
||||
"""A set of adaptable Tensors."""
|
||||
|
||||
def __init__(self, num_components: int,
|
||||
initializer: AbstractComponentsInitializer):
|
||||
super().__init__()
|
||||
@@ -112,6 +115,7 @@ class Components(AbstractComponents):
|
||||
|
||||
class AbstractLabels(torch.nn.Module):
|
||||
"""Abstract class for all labels modules."""
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
return self._labels.cpu()
|
||||
@@ -152,6 +156,7 @@ class AbstractLabels(torch.nn.Module):
|
||||
|
||||
class Labels(AbstractLabels):
|
||||
"""A set of standalone labels."""
|
||||
|
||||
def __init__(self,
|
||||
distribution: Union[dict, list, tuple],
|
||||
initializer: AbstractLabelsInitializer = LabelsInitializer()):
|
||||
@@ -182,6 +187,7 @@ class Labels(AbstractLabels):
|
||||
|
||||
class LabeledComponents(AbstractComponents):
|
||||
"""A set of adaptable components and corresponding unadaptable labels."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
distribution: Union[dict, list, tuple],
|
||||
@@ -249,6 +255,7 @@ class Reasonings(torch.nn.Module):
|
||||
The `reasonings` tensor is of shape [num_components, num_classes, 2].
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
distribution: Union[dict, list, tuple],
|
||||
@@ -308,6 +315,7 @@ class ReasoningComponents(AbstractComponents):
|
||||
three element probability distribution.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
distribution: Union[dict, list, tuple],
|
||||
|
@@ -11,7 +11,7 @@ from typing import (
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import parse_data_arg, parse_distribution
|
||||
from prototorch.utils import parse_data_arg, parse_distribution
|
||||
|
||||
|
||||
# Components
|
||||
@@ -26,6 +26,7 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
|
||||
Use this to 'generate' pre-initialized components elsewhere.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, components):
|
||||
self.components = components
|
||||
|
||||
@@ -40,6 +41,7 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
|
||||
|
||||
class ShapeAwareCompInitializer(AbstractComponentsInitializer):
|
||||
"""Abstract class for all dimension-aware components initializers."""
|
||||
|
||||
def __init__(self, shape: Union[Iterable, int]):
|
||||
if isinstance(shape, Iterable):
|
||||
self.component_shape = tuple(shape)
|
||||
@@ -53,6 +55,7 @@ class ShapeAwareCompInitializer(AbstractComponentsInitializer):
|
||||
|
||||
class ZerosCompInitializer(ShapeAwareCompInitializer):
|
||||
"""Generate zeros corresponding to the components shape."""
|
||||
|
||||
def generate(self, num_components: int):
|
||||
components = torch.zeros((num_components, ) + self.component_shape)
|
||||
return components
|
||||
@@ -60,6 +63,7 @@ class ZerosCompInitializer(ShapeAwareCompInitializer):
|
||||
|
||||
class OnesCompInitializer(ShapeAwareCompInitializer):
|
||||
"""Generate ones corresponding to the components shape."""
|
||||
|
||||
def generate(self, num_components: int):
|
||||
components = torch.ones((num_components, ) + self.component_shape)
|
||||
return components
|
||||
@@ -67,6 +71,7 @@ class OnesCompInitializer(ShapeAwareCompInitializer):
|
||||
|
||||
class FillValueCompInitializer(OnesCompInitializer):
|
||||
"""Generate components with the provided `fill_value`."""
|
||||
|
||||
def __init__(self, shape, fill_value: float = 1.0):
|
||||
super().__init__(shape)
|
||||
self.fill_value = fill_value
|
||||
@@ -79,6 +84,7 @@ class FillValueCompInitializer(OnesCompInitializer):
|
||||
|
||||
class UniformCompInitializer(OnesCompInitializer):
|
||||
"""Generate components by sampling from a continuous uniform distribution."""
|
||||
|
||||
def __init__(self, shape, minimum=0.0, maximum=1.0, scale=1.0):
|
||||
super().__init__(shape)
|
||||
self.minimum = minimum
|
||||
@@ -93,6 +99,7 @@ class UniformCompInitializer(OnesCompInitializer):
|
||||
|
||||
class RandomNormalCompInitializer(OnesCompInitializer):
|
||||
"""Generate components by sampling from a standard normal distribution."""
|
||||
|
||||
def __init__(self, shape, shift=0.0, scale=1.0):
|
||||
super().__init__(shape)
|
||||
self.shift = shift
|
||||
@@ -113,6 +120,7 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
|
||||
`data` has to be a torch tensor.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
data: torch.Tensor,
|
||||
noise: float = 0.0,
|
||||
@@ -137,6 +145,7 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
|
||||
|
||||
class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
|
||||
"""'Generate' the components from the provided data."""
|
||||
|
||||
def generate(self, num_components: int = 0):
|
||||
"""Ignore `num_components` and simply return transformed `self.data`."""
|
||||
components = self.generate_end_hook(self.data)
|
||||
@@ -145,6 +154,7 @@ class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
|
||||
|
||||
class SelectionCompInitializer(AbstractDataAwareCompInitializer):
|
||||
"""Generate components by uniformly sampling from the provided data."""
|
||||
|
||||
def generate(self, num_components: int):
|
||||
indices = torch.LongTensor(num_components).random_(0, len(self.data))
|
||||
samples = self.data[indices]
|
||||
@@ -154,6 +164,7 @@ class SelectionCompInitializer(AbstractDataAwareCompInitializer):
|
||||
|
||||
class MeanCompInitializer(AbstractDataAwareCompInitializer):
|
||||
"""Generate components by computing the mean of the provided data."""
|
||||
|
||||
def generate(self, num_components: int):
|
||||
mean = self.data.mean(dim=0)
|
||||
repeat_dim = [num_components] + [1] * len(mean.shape)
|
||||
@@ -172,6 +183,7 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
|
||||
target tensors.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
data,
|
||||
noise: float = 0.0,
|
||||
@@ -199,6 +211,7 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
|
||||
|
||||
class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
|
||||
"""'Generate' components from provided data and requested distribution."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
"""Ignore `distribution` and simply return transformed `self.data`."""
|
||||
components = self.generate_end_hook(self.data)
|
||||
@@ -207,6 +220,7 @@ class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
|
||||
|
||||
class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
||||
"""Abstract class for all stratified components initializers."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def subinit_type(self) -> Type[AbstractDataAwareCompInitializer]:
|
||||
@@ -229,6 +243,7 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
||||
|
||||
class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
|
||||
"""Generate components using stratified sampling from the provided data."""
|
||||
|
||||
@property
|
||||
def subinit_type(self):
|
||||
return SelectionCompInitializer
|
||||
@@ -236,6 +251,7 @@ class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
|
||||
|
||||
class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
|
||||
"""Generate components at stratified means of the provided data."""
|
||||
|
||||
@property
|
||||
def subinit_type(self):
|
||||
return MeanCompInitializer
|
||||
@@ -244,6 +260,7 @@ class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
|
||||
# Labels
|
||||
class AbstractLabelsInitializer(ABC):
|
||||
"""Abstract class for all labels initializers."""
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
...
|
||||
@@ -255,6 +272,7 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
|
||||
Use this to 'generate' pre-initialized labels elsewhere.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, labels):
|
||||
self.labels = labels
|
||||
|
||||
@@ -273,6 +291,7 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
|
||||
|
||||
class DataAwareLabelsInitializer(AbstractLabelsInitializer):
|
||||
"""'Generate' the labels from a torch Dataset."""
|
||||
|
||||
def __init__(self, data):
|
||||
self.data, self.targets = parse_data_arg(data)
|
||||
|
||||
@@ -283,6 +302,7 @@ class DataAwareLabelsInitializer(AbstractLabelsInitializer):
|
||||
|
||||
class LabelsInitializer(AbstractLabelsInitializer):
|
||||
"""Generate labels from `distribution`."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
distribution = parse_distribution(distribution)
|
||||
labels_list = []
|
||||
@@ -294,6 +314,7 @@ class LabelsInitializer(AbstractLabelsInitializer):
|
||||
|
||||
class OneHotLabelsInitializer(LabelsInitializer):
|
||||
"""Generate one-hot-encoded labels from `distribution`."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
distribution = parse_distribution(distribution)
|
||||
num_classes = len(distribution.keys())
|
||||
@@ -312,6 +333,7 @@ def compute_distribution_shape(distribution):
|
||||
|
||||
class AbstractReasoningsInitializer(ABC):
|
||||
"""Abstract class for all reasonings initializers."""
|
||||
|
||||
def __init__(self, components_first: bool = True):
|
||||
self.components_first = components_first
|
||||
|
||||
@@ -332,6 +354,7 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
Use this to 'generate' pre-initialized reasonings elsewhere.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, reasonings, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.reasonings = reasonings
|
||||
@@ -349,6 +372,7 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
|
||||
class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
"""Reasonings are all initialized with zeros."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
shape = compute_distribution_shape(distribution)
|
||||
reasonings = torch.zeros(*shape)
|
||||
@@ -358,6 +382,7 @@ class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
|
||||
class OnesReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
"""Reasonings are all initialized with ones."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
shape = compute_distribution_shape(distribution)
|
||||
reasonings = torch.ones(*shape)
|
||||
@@ -367,6 +392,7 @@ class OnesReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
|
||||
class RandomReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
"""Reasonings are randomly initialized."""
|
||||
|
||||
def __init__(self, minimum=0.4, maximum=0.6, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.minimum = minimum
|
||||
@@ -381,6 +407,7 @@ class RandomReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
|
||||
class PurePositiveReasoningsInitializer(AbstractReasoningsInitializer):
|
||||
"""Each component reasons positively for exactly one class."""
|
||||
|
||||
def generate(self, distribution: Union[dict, list, tuple]):
|
||||
num_components, num_classes, _ = compute_distribution_shape(
|
||||
distribution)
|
||||
@@ -399,6 +426,7 @@ class AbstractTransformInitializer(ABC):
|
||||
|
||||
class AbstractLinearTransformInitializer(AbstractTransformInitializer):
|
||||
"""Abstract class for all linear transform initializers."""
|
||||
|
||||
def __init__(self, out_dim_first: bool = False):
|
||||
self.out_dim_first = out_dim_first
|
||||
|
||||
@@ -415,6 +443,7 @@ class AbstractLinearTransformInitializer(AbstractTransformInitializer):
|
||||
|
||||
class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||
"""Initialize a matrix with zeros."""
|
||||
|
||||
def generate(self, in_dim: int, out_dim: int):
|
||||
weights = torch.zeros(in_dim, out_dim)
|
||||
return self.generate_end_hook(weights)
|
||||
@@ -422,6 +451,7 @@ class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||
|
||||
class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||
"""Initialize a matrix with ones."""
|
||||
|
||||
def generate(self, in_dim: int, out_dim: int):
|
||||
weights = torch.ones(in_dim, out_dim)
|
||||
return self.generate_end_hook(weights)
|
||||
@@ -429,6 +459,7 @@ class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||
|
||||
class EyeTransformInitializer(AbstractLinearTransformInitializer):
|
||||
"""Initialize a matrix with the largest possible identity matrix."""
|
||||
|
||||
def generate(self, in_dim: int, out_dim: int):
|
||||
weights = torch.zeros(in_dim, out_dim)
|
||||
I = torch.eye(min(in_dim, out_dim))
|
||||
@@ -438,6 +469,7 @@ class EyeTransformInitializer(AbstractLinearTransformInitializer):
|
||||
|
||||
class AbstractDataAwareLTInitializer(AbstractLinearTransformInitializer):
|
||||
"""Abstract class for all data-aware linear transform initializers."""
|
||||
|
||||
def __init__(self,
|
||||
data: torch.Tensor,
|
||||
noise: float = 0.0,
|
||||
@@ -458,6 +490,7 @@ class AbstractDataAwareLTInitializer(AbstractLinearTransformInitializer):
|
||||
|
||||
class PCALinearTransformInitializer(AbstractDataAwareLTInitializer):
|
||||
"""Initialize a matrix with Eigenvectors from the data."""
|
||||
|
||||
def generate(self, in_dim: int, out_dim: int):
|
||||
_, _, weights = torch.pca_lowrank(self.data, q=out_dim)
|
||||
return self.generate_end_hook(weights)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
import torch
|
||||
|
||||
from ..nn.activations import get_activation
|
||||
from prototorch.nn.activations import get_activation
|
||||
|
||||
|
||||
# Helpers
|
||||
@@ -106,6 +106,7 @@ def margin_loss(y_pred, y_true, margin=0.3):
|
||||
|
||||
|
||||
class GLVQLoss(torch.nn.Module):
|
||||
|
||||
def __init__(self, margin=0.0, transfer_fn="identity", beta=10, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.margin = margin
|
||||
@@ -119,6 +120,7 @@ class GLVQLoss(torch.nn.Module):
|
||||
|
||||
|
||||
class MarginLoss(torch.nn.modules.loss._Loss):
|
||||
|
||||
def __init__(self,
|
||||
margin=0.3,
|
||||
size_average=None,
|
||||
@@ -132,6 +134,7 @@ class MarginLoss(torch.nn.modules.loss._Loss):
|
||||
|
||||
|
||||
class NeuralGasEnergy(torch.nn.Module):
|
||||
|
||||
def __init__(self, lm, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.lm = lm
|
||||
@@ -152,6 +155,7 @@ class NeuralGasEnergy(torch.nn.Module):
|
||||
|
||||
|
||||
class GrowingNeuralGasEnergy(NeuralGasEnergy):
|
||||
|
||||
def __init__(self, topology_layer, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.topology_layer = topology_layer
|
||||
|
@@ -82,23 +82,27 @@ def stratified_prod_pooling(values: torch.Tensor,
|
||||
|
||||
class StratifiedSumPooling(torch.nn.Module):
|
||||
"""Thin wrapper over the `stratified_sum_pooling` function."""
|
||||
|
||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
||||
return stratified_sum_pooling(values, labels)
|
||||
|
||||
|
||||
class StratifiedProdPooling(torch.nn.Module):
|
||||
"""Thin wrapper over the `stratified_prod_pooling` function."""
|
||||
|
||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
||||
return stratified_prod_pooling(values, labels)
|
||||
|
||||
|
||||
class StratifiedMinPooling(torch.nn.Module):
|
||||
"""Thin wrapper over the `stratified_min_pooling` function."""
|
||||
|
||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
||||
return stratified_min_pooling(values, labels)
|
||||
|
||||
|
||||
class StratifiedMaxPooling(torch.nn.Module):
|
||||
"""Thin wrapper over the `stratified_max_pooling` function."""
|
||||
|
||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
||||
return stratified_max_pooling(values, labels)
|
||||
|
@@ -10,6 +10,7 @@ from .initializers import (
|
||||
|
||||
|
||||
class LinearTransform(torch.nn.Module):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
in_dim: int,
|
||||
|
Reference in New Issue
Block a user