13 Commits

Author SHA1 Message Date
Alexander Engelsberger
75a39f5b03 build: bump version 0.4.0 → 0.4.1 2022-01-11 18:29:55 +01:00
Alexander Engelsberger
1a0e697b27 Merge branch 'dev' into main 2022-01-11 18:29:32 +01:00
Alexander Engelsberger
1a17193b35 ci: add github actions (#16)
* chore: update pre-commit versions

* ci: remove old configurations

* ci: copy workflow from prototorch

* ci: run precommit for all files

* ci: add examples CPU test

* ci(test): failing example test

* ci: fix workflow definition

* ci(test): repeat failing example test

* ci: fix workflow definition

* ci(test): repeat failing example test II

* ci: fix test command

* ci: cleanup example test

* ci: remove travis badge
2022-01-11 18:28:50 +01:00
Alexander Engelsberger
aaa3c51e0a build: bump version 0.3.0 → 0.4.0 2021-12-09 15:58:16 +01:00
Jensun Ravichandran
62c5974a85 fix: correct typo in example script 2021-11-17 15:01:38 +01:00
Jensun Ravichandran
1d26226a2f fix(warning): specify dimension explicitly when calling softmin 2021-11-16 10:19:31 +01:00
Christoph
4232d0ed2a fix: spelling issues for previous commits 2021-11-15 11:43:39 +01:00
Christoph
a9edf06507 feat: ImageGTLVQ and SiameseGTLVQ with examples 2021-11-15 11:43:39 +01:00
Christoph
d3bb430104 feat: gtlvq with examples 2021-11-15 11:43:39 +01:00
Alexander Engelsberger
6ffd27d12a chore: Remove PytorchLightning CLI related code
Could be moved in a seperate plugin.
2021-10-11 15:16:12 +02:00
Alexander Engelsberger
859e2cae69 docs(dependencies): Add missing ipykernel dependency for docs 2021-10-11 15:11:53 +02:00
Alexander Engelsberger
d7ea89d47e feat: add simple test step 2021-09-10 19:19:51 +02:00
Jensun Ravichandran
fa928afe2c feat(vis): 2D EV projection for GMLVQ 2021-09-01 10:49:57 +02:00
36 changed files with 665 additions and 250 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.3.0 current_version = 0.4.1
commit = True commit = True
tag = True tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)

View File

@@ -1,15 +0,0 @@
# To validate the contents of your configuration file
# run the following command in the folder where the configuration file is located:
# codacy-analysis-cli validate-configuration --directory `pwd`
# To analyse, run:
# codacy-analysis-cli analyse --tool remark-lint --directory `pwd`
---
engines:
pylintpython3:
exclude_paths:
- config/engines.yml
remark-lint:
exclude_paths:
- config/engines.yml
exclude_paths:
- 'tests/**'

View File

@@ -1,2 +0,0 @@
comment:
require_changes: yes

25
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
# Thi workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: examples
on:
push:
paths:
- 'examples/**.py'
jobs:
cpu:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- name: Run examples
run: |
./tests/test_examples.sh examples/

73
.github/workflows/pythonapp.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: tests
on:
push:
pull_request:
branches: [ master ]
jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- uses: pre-commit/action@v2.0.3
compatibility:
needs: style
strategy:
fail-fast: false
matrix:
python-version: ["3.7", "3.8", "3.9"]
os: [ubuntu-latest, windows-latest]
exclude:
- os: windows-latest
python-version: "3.7"
- os: windows-latest
python-version: "3.8"
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- name: Test with pytest
run: |
pytest
publish_pypi:
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
needs: compatibility
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
pip install wheel
- name: Build package
run: python setup.py sdist bdist_wheel
- name: Publish a Python distribution to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}

View File

@@ -3,7 +3,7 @@
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1 rev: v4.1.0
hooks: hooks:
- id: trailing-whitespace - id: trailing-whitespace
- id: end-of-file-fixer - id: end-of-file-fixer
@@ -18,19 +18,19 @@ repos:
- id: autoflake - id: autoflake
- repo: http://github.com/PyCQA/isort - repo: http://github.com/PyCQA/isort
rev: 5.8.0 rev: 5.10.1
hooks: hooks:
- id: isort - id: isort
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.902 rev: v0.931
hooks: hooks:
- id: mypy - id: mypy
files: prototorch files: prototorch
additional_dependencies: [types-pkg_resources] additional_dependencies: [types-pkg_resources]
- repo: https://github.com/pre-commit/mirrors-yapf - repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.31.0 rev: v0.32.0
hooks: hooks:
- id: yapf - id: yapf
@@ -42,7 +42,7 @@ repos:
- id: python-check-blanket-noqa - id: python-check-blanket-noqa
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: v2.19.4 rev: v2.31.0
hooks: hooks:
- id: pyupgrade - id: pyupgrade

View File

@@ -1,44 +0,0 @@
dist: bionic
sudo: false
language: python
python:
- 3.9
- 3.8
- 3.7
- 3.6
cache:
directories:
- "$HOME/.cache/pip"
- "./tests/artifacts"
- "$HOME/datasets"
install:
- pip install git+git://github.com/si-cim/prototorch@dev --progress-bar off
- pip install .[all] --progress-bar off
script:
- coverage run -m pytest
- ./tests/test_examples.sh examples/
after_success:
- bash <(curl -s https://codecov.io/bash)
# Publish on PyPI
jobs:
include:
- stage: build
python: 3.9
script: echo "Starting Pypi build"
deploy:
provider: pypi
username: __token__
distributions: "sdist bdist_wheel"
password:
secure: PDoASdYdVlt1aIROYilAsCW6XpBs/TDel0CSptDzX0CI7i4+ksEW6Jk0JyL58bQt7V4F8PeGty4A8SODzAUIk2d8sty5RI4VJjvXZFCXlUsW+JGUN3EvWNqJLnwN8TDxgu2ENao37GUh0dC6pL8b6bVDGeOLaY1E/YR1jimmTJuxxjKjBIU8ByqTNBnC3rzybMTPU3nRoOM/WMQUyReHrPoUJj685sLqrLruhAqhiYsPbotP8xY6i8+KBbhp5vgiARV2+LkbeGcYZwozCzrEqPKY7YIfVPh895cw0v4NRyFwK1P2jyyIt22Z9Ni0Uy1J5/Qp9Sv6mBPeGjm3pnpDCQyS+2bNIDaj08KUYTIo1mC/Jcu4jQgppZEF+oey9q1tgGo+/JhsTeERKV9BoPF5HDiRArU1s5aWJjFnCsHfu+W1XqX8bwN3aTYsEIaApT3/irc6XyFJIfMN82+z+lUcZ4Y1yAHT3nH1Vif+pZYZB0UOSGrHwuI/UayjKzbCzHMuHWylWB/9ehd4o4YVp6iubVHc7Sj0KQkwBgwgl6TvwNcUuFsplFabCxmX0mVcavXsWiOBc+ivPmU6574zGj0JcEk5ghVgnKH+QS96aVrKOzegwbl4O13jY8dJp+/zgXl0gJOvRKr4BhuBJKcBaMQHdSKUChVsJJtqDyt59GvWcbg=
on:
tags: true
skip_existing: true
# The password is encrypted with:
# `cd prototorch && travis encrypt your-pypi-api-token --add deploy.password`
# See https://docs.travis-ci.com/user/deployment/pypi and
# https://github.com/travis-ci/travis.rb#installation
# for more details
# Note: The encrypt command does not work well in ZSH.

View File

@@ -1,6 +1,5 @@
# ProtoTorch Models # ProtoTorch Models
[![Build Status](https://api.travis-ci.com/si-cim/prototorch_models.svg?branch=main)](https://travis-ci.com/github/si-cim/prototorch_models)
[![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/si-cim/prototorch_models?color=yellow&label=version)](https://github.com/si-cim/prototorch_models/releases) [![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/si-cim/prototorch_models?color=yellow&label=version)](https://github.com/si-cim/prototorch_models/releases)
[![PyPI](https://img.shields.io/pypi/v/prototorch_models)](https://pypi.org/project/prototorch_models/) [![PyPI](https://img.shields.io/pypi/v/prototorch_models)](https://pypi.org/project/prototorch_models/)
[![GitHub license](https://img.shields.io/github/license/si-cim/prototorch_models)](https://github.com/si-cim/prototorch_models/blob/master/LICENSE) [![GitHub license](https://img.shields.io/github/license/si-cim/prototorch_models)](https://github.com/si-cim/prototorch_models/blob/master/LICENSE)

View File

@@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
# The full version, including alpha/beta/rc tags # The full version, including alpha/beta/rc tags
# #
release = "0.3.0" release = "0.4.1"
# -- General configuration --------------------------------------------------- # -- General configuration ---------------------------------------------------

View File

@@ -1,8 +0,0 @@
# Examples using Lightning CLI
Examples in this folder use the experimental [Lightning CLI](https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_cli.html).
To use the example run
```
python gmlvq.py --config gmlvq.yaml
```

View File

@@ -1,19 +0,0 @@
"""GMLVQ example using the MNIST dataset."""
import prototorch as pt
import torch
from prototorch.models import ImageGMLVQ
from prototorch.models.abstract import PrototypeModel
from prototorch.models.data import MNISTDataModule
from pytorch_lightning.utilities.cli import LightningCLI
class ExperimentClass(ImageGMLVQ):
def __init__(self, hparams, **kwargs):
super().__init__(hparams,
optimizer=torch.optim.Adam,
prototype_initializer=pt.components.zeros(28 * 28),
**kwargs)
cli = LightningCLI(ImageGMLVQ, MNISTDataModule)

View File

@@ -1,11 +0,0 @@
model:
hparams:
input_dim: 784
latent_dim: 784
distribution:
num_classes: 10
prototypes_per_class: 2
proto_lr: 0.01
bb_lr: 0.01
data:
batch_size: 32

58
examples/gmlvq_iris.py Normal file
View File

@@ -0,0 +1,58 @@
"""GMLVQ example using the Iris dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
from torch.optim.lr_scheduler import ExponentialLR
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.Iris()
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64)
# Hyperparameters
hparams = dict(
input_dim=4,
latent_dim=4,
distribution={
"num_classes": 3,
"per_class": 2
},
proto_lr=0.01,
bb_lr=0.01,
)
# Initialize the model
model = pt.models.GMLVQ(
hparams,
optimizer=torch.optim.Adam,
prototypes_initializer=pt.initializers.SMCI(train_ds),
lr_scheduler=ExponentialLR,
lr_scheduler_kwargs=dict(gamma=0.99, verbose=False),
)
# Compute intermediate input and output sizes
model.example_input_array = torch.zeros(4, 4)
# Callbacks
vis = pt.models.VisGMLVQ2D(data=train_ds)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[vis],
weights_summary="full",
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)

View File

@@ -1,4 +1,4 @@
"""GLVQ example using the spiral dataset.""" """GMLVQ example using the spiral dataset."""
import argparse import argparse

104
examples/gtlvq_mnist.py Normal file
View File

@@ -0,0 +1,104 @@
"""GTLVQ example using the MNIST dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
from torchvision import transforms
from torchvision.datasets import MNIST
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = MNIST(
"~/datasets",
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
)
test_ds = MNIST(
"~/datasets",
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds,
num_workers=0,
batch_size=256)
test_loader = torch.utils.data.DataLoader(test_ds,
num_workers=0,
batch_size=256)
# Hyperparameters
num_classes = 10
prototypes_per_class = 1
hparams = dict(
input_dim=28 * 28,
latent_dim=28,
distribution=(num_classes, prototypes_per_class),
proto_lr=0.01,
bb_lr=0.01,
)
# Initialize the model
model = pt.models.ImageGTLVQ(
hparams,
optimizer=torch.optim.Adam,
prototypes_initializer=pt.initializers.SMCI(train_ds),
#Use one batch of data for subspace initiator.
omega_initializer=pt.initializers.PCALinearTransformInitializer(
next(iter(train_loader))[0].reshape(256, 28 * 28)))
# Callbacks
vis = pt.models.VisImgComp(
data=train_ds,
num_columns=10,
show=False,
tensorboard=True,
random_data=100,
add_embedding=True,
embedding_data=200,
flatten_data=False,
)
pruning = pt.models.PruneLoserPrototypes(
threshold=0.01,
idle_epochs=1,
prune_quota_per_epoch=10,
frequency=1,
verbose=True,
)
es = pl.callbacks.EarlyStopping(
monitor="train_loss",
min_delta=0.001,
patience=15,
mode="min",
check_on_train_epoch_end=True,
)
# Setup trainer
# using GPUs here is strongly recommended!
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
vis,
pruning,
# es,
],
terminate_on_nan=True,
weights_summary=None,
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)

63
examples/gtlvq_moons.py Normal file
View File

@@ -0,0 +1,63 @@
"""Localized-GTLVQ example using the Moons dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Reproducibility
pl.utilities.seed.seed_everything(seed=2)
# Dataset
train_ds = pt.datasets.Moons(num_samples=300, noise=0.2, seed=42)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds,
batch_size=256,
shuffle=True)
# Hyperparameters
# Latent_dim should be lower than input dim.
hparams = dict(distribution=[1, 3], input_dim=2, latent_dim=1)
# Initialize the model
model = pt.models.GTLVQ(
hparams, prototypes_initializer=pt.initializers.SMCI(train_ds))
# Compute intermediate input and output sizes
model.example_input_array = torch.zeros(4, 2)
# Summary
print(model)
# Callbacks
vis = pt.models.VisGLVQ2D(data=train_ds)
es = pl.callbacks.EarlyStopping(
monitor="train_acc",
min_delta=0.001,
patience=20,
mode="max",
verbose=False,
check_on_train_epoch_end=True,
)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
vis,
es,
],
weights_summary="full",
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)

View File

@@ -6,6 +6,7 @@ import prototorch as pt
import pytorch_lightning as pl import pytorch_lightning as pl
import torch import torch
from sklearn.datasets import load_iris from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
if __name__ == "__main__": if __name__ == "__main__":
# Command-line arguments # Command-line arguments
@@ -14,12 +15,20 @@ if __name__ == "__main__":
args = parser.parse_args() args = parser.parse_args()
# Dataset # Dataset
x_train, y_train = load_iris(return_X_y=True) X, y = load_iris(return_X_y=True)
x_train = x_train[:, [0, 2]] X = X[:, [0, 2]]
train_ds = pt.datasets.NumpyDataset(x_train, y_train)
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.5,
random_state=42)
train_ds = pt.datasets.NumpyDataset(X_train, y_train)
test_ds = pt.datasets.NumpyDataset(X_test, y_test)
# Dataloaders # Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=150) train_loader = torch.utils.data.DataLoader(train_ds, batch_size=16)
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=16)
# Hyperparameters # Hyperparameters
hparams = dict(k=5) hparams = dict(k=5)
@@ -35,7 +44,7 @@ if __name__ == "__main__":
# Callbacks # Callbacks
vis = pt.models.VisGLVQ2D( vis = pt.models.VisGLVQ2D(
data=(x_train, y_train), data=(X_train, y_train),
resolution=200, resolution=200,
block=True, block=True,
) )
@@ -53,5 +62,8 @@ if __name__ == "__main__":
trainer.fit(model, train_loader) trainer.fit(model, train_loader)
# Recall # Recall
y_pred = model.predict(torch.tensor(x_train)) y_pred = model.predict(torch.tensor(X_train))
print(y_pred) print(y_pred)
# Test
trainer.test(model, dataloaders=test_loader)

View File

@@ -10,6 +10,7 @@ from prototorch.utils.colors import hex_to_rgb
class Vis2DColorSOM(pl.Callback): class Vis2DColorSOM(pl.Callback):
def __init__(self, data, title="ColorSOMe", pause_time=0.1): def __init__(self, data, title="ColorSOMe", pause_time=0.1):
super().__init__() super().__init__()
self.title = title self.title = title

View File

@@ -8,6 +8,7 @@ import torch
class Backbone(torch.nn.Module): class Backbone(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2): def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__() super().__init__()
self.input_size = input_size self.input_size = input_size

View File

@@ -8,6 +8,7 @@ import torch
class Backbone(torch.nn.Module): class Backbone(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2): def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__() super().__init__()
self.input_size = input_size self.input_size = input_size

View File

@@ -0,0 +1,73 @@
"""Siamese GTLVQ example using all four dimensions of the Iris dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
class Backbone(torch.nn.Module):
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.latent_size = latent_size
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
self.activation = torch.nn.Sigmoid()
def forward(self, x):
x = self.activation(self.dense1(x))
out = self.activation(self.dense2(x))
return out
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.Iris()
# Reproducibility
pl.utilities.seed.seed_everything(seed=2)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=150)
# Hyperparameters
hparams = dict(distribution=[1, 2, 3],
proto_lr=0.01,
bb_lr=0.01,
input_dim=2,
latent_dim=1)
# Initialize the backbone
backbone = Backbone(latent_size=hparams["input_dim"])
# Initialize the model
model = pt.models.SiameseGTLVQ(
hparams,
prototypes_initializer=pt.initializers.SMCI(train_ds),
backbone=backbone,
both_path_gradients=False,
)
# Model summary
print(model)
# Callbacks
vis = pt.models.VisSiameseGLVQ2D(data=train_ds, border=0.1)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[vis],
)
# Training loop
trainer.fit(model, train_loader)

View File

@@ -8,17 +8,34 @@ from .glvq import (
GLVQ21, GLVQ21,
GMLVQ, GMLVQ,
GRLVQ, GRLVQ,
GTLVQ,
LGMLVQ, LGMLVQ,
LVQMLN, LVQMLN,
ImageGLVQ, ImageGLVQ,
ImageGMLVQ, ImageGMLVQ,
ImageGTLVQ,
SiameseGLVQ, SiameseGLVQ,
SiameseGMLVQ, SiameseGMLVQ,
SiameseGTLVQ,
) )
from .knn import KNN from .knn import KNN
from .lvq import LVQ1, LVQ21, MedianLVQ from .lvq import (
from .probabilistic import CELVQ, PLVQ, RSLVQ, SLVQ LVQ1,
from .unsupervised import GrowingNeuralGas, HeskesSOM, KohonenSOM, NeuralGas LVQ21,
MedianLVQ,
)
from .probabilistic import (
CELVQ,
PLVQ,
RSLVQ,
SLVQ,
)
from .unsupervised import (
GrowingNeuralGas,
HeskesSOM,
KohonenSOM,
NeuralGas,
)
from .vis import * from .vis import *
__version__ = "0.3.0" __version__ = "0.4.1"

View File

@@ -14,6 +14,7 @@ from ..nn.wrappers import LambdaLayer
class ProtoTorchBolt(pl.LightningModule): class ProtoTorchBolt(pl.LightningModule):
"""All ProtoTorch models are ProtoTorch Bolts.""" """All ProtoTorch models are ProtoTorch Bolts."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__() super().__init__()
@@ -52,6 +53,7 @@ class ProtoTorchBolt(pl.LightningModule):
class PrototypeModel(ProtoTorchBolt): class PrototypeModel(ProtoTorchBolt):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -81,6 +83,7 @@ class PrototypeModel(ProtoTorchBolt):
class UnsupervisedPrototypeModel(PrototypeModel): class UnsupervisedPrototypeModel(PrototypeModel):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -103,6 +106,7 @@ class UnsupervisedPrototypeModel(PrototypeModel):
class SupervisedPrototypeModel(PrototypeModel): class SupervisedPrototypeModel(PrototypeModel):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -135,7 +139,7 @@ class SupervisedPrototypeModel(PrototypeModel):
distances = self.compute_distances(x) distances = self.compute_distances(x)
_, plabels = self.proto_layer() _, plabels = self.proto_layer()
winning = stratified_min_pooling(distances, plabels) winning = stratified_min_pooling(distances, plabels)
y_pred = torch.nn.functional.softmin(winning) y_pred = torch.nn.functional.softmin(winning, dim=1)
return y_pred return y_pred
def predict_from_distances(self, distances): def predict_from_distances(self, distances):
@@ -162,6 +166,14 @@ class SupervisedPrototypeModel(PrototypeModel):
prog_bar=True, prog_bar=True,
logger=True) logger=True)
def test_step(self, batch, batch_idx):
x, targets = batch
preds = self.predict(x)
accuracy = torchmetrics.functional.accuracy(preds.int(), targets.int())
self.log("test_acc", accuracy)
class ProtoTorchMixin(object): class ProtoTorchMixin(object):
"""All mixins are ProtoTorchMixins.""" """All mixins are ProtoTorchMixins."""
@@ -170,6 +182,7 @@ class ProtoTorchMixin(object):
class NonGradientMixin(ProtoTorchMixin): class NonGradientMixin(ProtoTorchMixin):
"""Mixin for custom non-gradient optimization.""" """Mixin for custom non-gradient optimization."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.automatic_optimization = False self.automatic_optimization = False
@@ -180,6 +193,7 @@ class NonGradientMixin(ProtoTorchMixin):
class ImagePrototypesMixin(ProtoTorchMixin): class ImagePrototypesMixin(ProtoTorchMixin):
"""Mixin for models with image prototypes.""" """Mixin for models with image prototypes."""
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx): def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
"""Constrain the components to the range [0, 1] by clamping after updates.""" """Constrain the components to the range [0, 1] by clamping after updates."""
self.proto_layer.components.data.clamp_(0.0, 1.0) self.proto_layer.components.data.clamp_(0.0, 1.0)

View File

@@ -11,6 +11,7 @@ from .extras import ConnectionTopology
class PruneLoserPrototypes(pl.Callback): class PruneLoserPrototypes(pl.Callback):
def __init__(self, def __init__(self,
threshold=0.01, threshold=0.01,
idle_epochs=10, idle_epochs=10,
@@ -67,6 +68,7 @@ class PruneLoserPrototypes(pl.Callback):
class PrototypeConvergence(pl.Callback): class PrototypeConvergence(pl.Callback):
def __init__(self, min_delta=0.01, idle_epochs=10, verbose=False): def __init__(self, min_delta=0.01, idle_epochs=10, verbose=False):
self.min_delta = min_delta self.min_delta = min_delta
self.idle_epochs = idle_epochs # epochs to wait self.idle_epochs = idle_epochs # epochs to wait
@@ -89,6 +91,7 @@ class GNGCallback(pl.Callback):
Based on "A Growing Neural Gas Network Learns Topologies" by Bernd Fritzke. Based on "A Growing Neural Gas Network Learns Topologies" by Bernd Fritzke.
""" """
def __init__(self, reduction=0.1, freq=10): def __init__(self, reduction=0.1, freq=10):
self.reduction = reduction self.reduction = reduction
self.freq = freq self.freq = freq

View File

@@ -13,6 +13,7 @@ from .glvq import SiameseGLVQ
class CBC(SiameseGLVQ): class CBC(SiameseGLVQ):
"""Classification-By-Components.""" """Classification-By-Components."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)

View File

@@ -1,123 +0,0 @@
"""Prototorch Data Modules
This allows to store the used dataset inside a Lightning Module.
Mainly used for PytorchLightningCLI configurations.
"""
from typing import Any, Optional, Type
import prototorch as pt
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset, random_split
from torchvision import transforms
from torchvision.datasets import MNIST
# MNIST
class MNISTDataModule(pl.LightningDataModule):
def __init__(self, batch_size=32):
super().__init__()
self.batch_size = batch_size
# Download mnist dataset as side-effect, only called on the first cpu
def prepare_data(self):
MNIST("~/datasets", train=True, download=True)
MNIST("~/datasets", train=False, download=True)
# called for every GPU/machine (assigning state is OK)
def setup(self, stage=None):
# Transforms
transform = transforms.Compose([
transforms.ToTensor(),
])
# Split dataset
if stage in (None, "fit"):
mnist_train = MNIST("~/datasets", train=True, transform=transform)
self.mnist_train, self.mnist_val = random_split(
mnist_train,
[55000, 5000],
)
if stage == (None, "test"):
self.mnist_test = MNIST(
"~/datasets",
train=False,
transform=transform,
)
# Dataloaders
def train_dataloader(self):
mnist_train = DataLoader(self.mnist_train, batch_size=self.batch_size)
return mnist_train
def val_dataloader(self):
mnist_val = DataLoader(self.mnist_val, batch_size=self.batch_size)
return mnist_val
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
return mnist_test
# def train_on_mnist(batch_size=256) -> type:
# class DataClass(pl.LightningModule):
# datamodule = MNISTDataModule(batch_size=batch_size)
# def __init__(self, *args, **kwargs):
# prototype_initializer = kwargs.pop(
# "prototype_initializer", pt.components.Zeros((28, 28, 1)))
# super().__init__(*args,
# prototype_initializer=prototype_initializer,
# **kwargs)
# dc: Type[DataClass] = DataClass
# return dc
# ABSTRACT
class GeneralDataModule(pl.LightningDataModule):
def __init__(self, dataset: Dataset, batch_size: int = 32) -> None:
super().__init__()
self.train_dataset = dataset
self.batch_size = batch_size
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_dataset, batch_size=self.batch_size)
# def train_on_dataset(dataset: Dataset, batch_size: int = 256):
# class DataClass(pl.LightningModule):
# datamodule = GeneralDataModule(dataset, batch_size)
# datashape = dataset[0][0].shape
# example_input_array = torch.zeros_like(dataset[0][0]).unsqueeze(0)
# def __init__(self, *args: Any, **kwargs: Any) -> None:
# prototype_initializer = kwargs.pop(
# "prototype_initializer",
# pt.components.Zeros(self.datashape),
# )
# super().__init__(*args,
# prototype_initializer=prototype_initializer,
# **kwargs)
# return DataClass
# if __name__ == "__main__":
# from prototorch.models import GLVQ
# demo_dataset = pt.datasets.Iris()
# TrainingClass: Type = train_on_dataset(demo_dataset)
# class DemoGLVQ(TrainingClass, GLVQ):
# """Model Definition."""
# # Hyperparameters
# hparams = dict(
# distribution={
# "num_classes": 3,
# "prototypes_per_class": 4
# },
# lr=0.01,
# )
# initialized = DemoGLVQ(hparams)
# print(initialized)

View File

@@ -15,7 +15,46 @@ def rank_scaled_gaussian(distances, lambd):
return torch.exp(-torch.exp(-ranks / lambd) * distances) return torch.exp(-torch.exp(-ranks / lambd) * distances)
def orthogonalization(tensors):
"""Orthogonalization via polar decomposition """
u, _, v = torch.svd(tensors, compute_uv=True)
u_shape = tuple(list(u.shape))
v_shape = tuple(list(v.shape))
# reshape to (num x N x M)
u = torch.reshape(u, (-1, u_shape[-2], u_shape[-1]))
v = torch.reshape(v, (-1, v_shape[-2], v_shape[-1]))
out = u @ v.permute([0, 2, 1])
out = torch.reshape(out, u_shape[:-1] + (v_shape[-2], ))
return out
def ltangent_distance(x, y, omegas):
r"""Localized Tangent distance.
Compute Orthogonal Complement: math:`\bm P_k = \bm I - \Omega_k \Omega_k^T`
Compute Tangent Distance: math:`{\| \bm P \bm x - \bm P_k \bm y_k \|}_2`
:param `torch.tensor` omegas: Three dimensional matrix
:rtype: `torch.tensor`
"""
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
p = torch.eye(omegas.shape[-2], device=omegas.device) - torch.bmm(
omegas, omegas.permute([0, 2, 1]))
projected_x = x @ p
projected_y = torch.diagonal(y @ p).T
expanded_y = torch.unsqueeze(projected_y, dim=1)
batchwise_difference = expanded_y - projected_x
differences_squared = batchwise_difference**2
distances = torch.sqrt(torch.sum(differences_squared, dim=2))
distances = distances.permute(1, 0)
return distances
class GaussianPrior(torch.nn.Module): class GaussianPrior(torch.nn.Module):
def __init__(self, variance): def __init__(self, variance):
super().__init__() super().__init__()
self.variance = variance self.variance = variance
@@ -25,6 +64,7 @@ class GaussianPrior(torch.nn.Module):
class RankScaledGaussianPrior(torch.nn.Module): class RankScaledGaussianPrior(torch.nn.Module):
def __init__(self, lambd): def __init__(self, lambd):
super().__init__() super().__init__()
self.lambd = lambd self.lambd = lambd
@@ -34,6 +74,7 @@ class RankScaledGaussianPrior(torch.nn.Module):
class ConnectionTopology(torch.nn.Module): class ConnectionTopology(torch.nn.Module):
def __init__(self, agelimit, num_prototypes): def __init__(self, agelimit, num_prototypes):
super().__init__() super().__init__()
self.agelimit = agelimit self.agelimit = agelimit

View File

@@ -4,16 +4,26 @@ import torch
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter
from ..core.competitions import wtac from ..core.competitions import wtac
from ..core.distances import lomega_distance, omega_distance, squared_euclidean_distance from ..core.distances import (
lomega_distance,
omega_distance,
squared_euclidean_distance,
)
from ..core.initializers import EyeTransformInitializer from ..core.initializers import EyeTransformInitializer
from ..core.losses import GLVQLoss, lvq1_loss, lvq21_loss from ..core.losses import (
GLVQLoss,
lvq1_loss,
lvq21_loss,
)
from ..core.transforms import LinearTransform from ..core.transforms import LinearTransform
from ..nn.wrappers import LambdaLayer, LossLayer from ..nn.wrappers import LambdaLayer, LossLayer
from .abstract import ImagePrototypesMixin, SupervisedPrototypeModel from .abstract import ImagePrototypesMixin, SupervisedPrototypeModel
from .extras import ltangent_distance, orthogonalization
class GLVQ(SupervisedPrototypeModel): class GLVQ(SupervisedPrototypeModel):
"""Generalized Learning Vector Quantization.""" """Generalized Learning Vector Quantization."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -98,6 +108,7 @@ class SiameseGLVQ(GLVQ):
transformation pipeline are only learned from the inputs. transformation pipeline are only learned from the inputs.
""" """
def __init__(self, def __init__(self,
hparams, hparams,
backbone=torch.nn.Identity(), backbone=torch.nn.Identity(),
@@ -164,6 +175,7 @@ class LVQMLN(SiameseGLVQ):
rather in the embedding space. rather in the embedding space.
""" """
def compute_distances(self, x): def compute_distances(self, x):
latent_protos, _ = self.proto_layer() latent_protos, _ = self.proto_layer()
latent_x = self.backbone(x) latent_x = self.backbone(x)
@@ -179,6 +191,7 @@ class GRLVQ(SiameseGLVQ):
TODO Make a RelevanceLayer. `bb_lr` is ignored otherwise. TODO Make a RelevanceLayer. `bb_lr` is ignored otherwise.
""" """
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -204,6 +217,7 @@ class SiameseGMLVQ(SiameseGLVQ):
Implemented as a Siamese network with a linear transformation backbone. Implemented as a Siamese network with a linear transformation backbone.
""" """
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -234,6 +248,7 @@ class GMLVQ(GLVQ):
function. This makes it easier to implement a localized variant. function. This makes it easier to implement a localized variant.
""" """
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
distance_fn = kwargs.pop("distance_fn", omega_distance) distance_fn = kwargs.pop("distance_fn", omega_distance)
super().__init__(hparams, distance_fn=distance_fn, **kwargs) super().__init__(hparams, distance_fn=distance_fn, **kwargs)
@@ -251,6 +266,12 @@ class GMLVQ(GLVQ):
def omega_matrix(self): def omega_matrix(self):
return self._omega.detach().cpu() return self._omega.detach().cpu()
@property
def lambda_matrix(self):
omega = self._omega.detach() # (input_dim, latent_dim)
lam = omega @ omega.T
return lam.detach().cpu()
def compute_distances(self, x): def compute_distances(self, x):
protos, _ = self.proto_layer() protos, _ = self.proto_layer()
distances = self.distance_layer(x, protos, self._omega) distances = self.distance_layer(x, protos, self._omega)
@@ -262,6 +283,7 @@ class GMLVQ(GLVQ):
class LGMLVQ(GMLVQ): class LGMLVQ(GMLVQ):
"""Localized and Generalized Matrix Learning Vector Quantization.""" """Localized and Generalized Matrix Learning Vector Quantization."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
distance_fn = kwargs.pop("distance_fn", lomega_distance) distance_fn = kwargs.pop("distance_fn", lomega_distance)
super().__init__(hparams, distance_fn=distance_fn, **kwargs) super().__init__(hparams, distance_fn=distance_fn, **kwargs)
@@ -276,8 +298,48 @@ class LGMLVQ(GMLVQ):
self.register_parameter("_omega", Parameter(omega)) self.register_parameter("_omega", Parameter(omega))
class GTLVQ(LGMLVQ):
"""Localized and Generalized Tangent Learning Vector Quantization."""
def __init__(self, hparams, **kwargs):
distance_fn = kwargs.pop("distance_fn", ltangent_distance)
super().__init__(hparams, distance_fn=distance_fn, **kwargs)
omega_initializer = kwargs.get("omega_initializer")
if omega_initializer is not None:
subspace = omega_initializer.generate(self.hparams.input_dim,
self.hparams.latent_dim)
omega = torch.repeat_interleave(subspace.unsqueeze(0),
self.num_prototypes,
dim=0)
else:
omega = torch.rand(
self.num_prototypes,
self.hparams.input_dim,
self.hparams.latent_dim,
device=self.device,
)
# Re-register `_omega` to override the one from the super class.
self.register_parameter("_omega", Parameter(omega))
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
with torch.no_grad():
self._omega.copy_(orthogonalization(self._omega))
class SiameseGTLVQ(SiameseGLVQ, GTLVQ):
"""Generalized Tangent Learning Vector Quantization.
Implemented as a Siamese network with a linear transformation backbone.
"""
class GLVQ1(GLVQ): class GLVQ1(GLVQ):
"""Generalized Learning Vector Quantization 1.""" """Generalized Learning Vector Quantization 1."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
self.loss = LossLayer(lvq1_loss) self.loss = LossLayer(lvq1_loss)
@@ -286,6 +348,7 @@ class GLVQ1(GLVQ):
class GLVQ21(GLVQ): class GLVQ21(GLVQ):
"""Generalized Learning Vector Quantization 2.1.""" """Generalized Learning Vector Quantization 2.1."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
self.loss = LossLayer(lvq21_loss) self.loss = LossLayer(lvq21_loss)
@@ -308,3 +371,18 @@ class ImageGMLVQ(ImagePrototypesMixin, GMLVQ):
after updates. after updates.
""" """
class ImageGTLVQ(ImagePrototypesMixin, GTLVQ):
"""GTLVQ for training on image data.
GTLVQ model that constrains the prototypes to the range [0, 1] by clamping
after updates.
"""
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
"""Constrain the components to the range [0, 1] by clamping after updates."""
self.proto_layer.components.data.clamp_(0.0, 1.0)
with torch.no_grad():
self._omega.copy_(orthogonalization(self._omega))

View File

@@ -4,13 +4,17 @@ import warnings
from ..core.competitions import KNNC from ..core.competitions import KNNC
from ..core.components import LabeledComponents from ..core.components import LabeledComponents
from ..core.initializers import LiteralCompInitializer, LiteralLabelsInitializer from ..core.initializers import (
LiteralCompInitializer,
LiteralLabelsInitializer,
)
from ..utils.utils import parse_data_arg from ..utils.utils import parse_data_arg
from .abstract import SupervisedPrototypeModel from .abstract import SupervisedPrototypeModel
class KNN(SupervisedPrototypeModel): class KNN(SupervisedPrototypeModel):
"""K-Nearest-Neighbors classification algorithm.""" """K-Nearest-Neighbors classification algorithm."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)

View File

@@ -9,6 +9,7 @@ from .glvq import GLVQ
class LVQ1(NonGradientMixin, GLVQ): class LVQ1(NonGradientMixin, GLVQ):
"""Learning Vector Quantization 1.""" """Learning Vector Quantization 1."""
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
protos, plables = self.proto_layer() protos, plables = self.proto_layer()
x, y = train_batch x, y = train_batch
@@ -38,6 +39,7 @@ class LVQ1(NonGradientMixin, GLVQ):
class LVQ21(NonGradientMixin, GLVQ): class LVQ21(NonGradientMixin, GLVQ):
"""Learning Vector Quantization 2.1.""" """Learning Vector Quantization 2.1."""
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
protos, plabels = self.proto_layer() protos, plabels = self.proto_layer()
@@ -70,6 +72,7 @@ class MedianLVQ(NonGradientMixin, GLVQ):
# TODO Avoid computing distances over and over # TODO Avoid computing distances over and over
""" """
def __init__(self, hparams, verbose=True, **kwargs): def __init__(self, hparams, verbose=True, **kwargs):
self.verbose = verbose self.verbose = verbose
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)

View File

@@ -11,6 +11,7 @@ from .glvq import GLVQ, SiameseGMLVQ
class CELVQ(GLVQ): class CELVQ(GLVQ):
"""Cross-Entropy Learning Vector Quantization.""" """Cross-Entropy Learning Vector Quantization."""
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -29,6 +30,7 @@ class CELVQ(GLVQ):
class ProbabilisticLVQ(GLVQ): class ProbabilisticLVQ(GLVQ):
def __init__(self, hparams, rejection_confidence=0.0, **kwargs): def __init__(self, hparams, rejection_confidence=0.0, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -62,6 +64,7 @@ class ProbabilisticLVQ(GLVQ):
class SLVQ(ProbabilisticLVQ): class SLVQ(ProbabilisticLVQ):
"""Soft Learning Vector Quantization.""" """Soft Learning Vector Quantization."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.loss = LossLayer(nllr_loss) self.loss = LossLayer(nllr_loss)
@@ -70,6 +73,7 @@ class SLVQ(ProbabilisticLVQ):
class RSLVQ(ProbabilisticLVQ): class RSLVQ(ProbabilisticLVQ):
"""Robust Soft Learning Vector Quantization.""" """Robust Soft Learning Vector Quantization."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.loss = LossLayer(rslvq_loss) self.loss = LossLayer(rslvq_loss)
@@ -81,6 +85,7 @@ class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):
TODO: Use Backbone LVQ instead TODO: Use Backbone LVQ instead
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.conditional_distribution = RankScaledGaussianPrior( self.conditional_distribution = RankScaledGaussianPrior(

View File

@@ -18,6 +18,7 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
TODO Allow non-2D grids TODO Allow non-2D grids
""" """
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
h, w = hparams.get("shape") h, w = hparams.get("shape")
# Ignore `num_prototypes` # Ignore `num_prototypes`
@@ -69,6 +70,7 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
class HeskesSOM(UnsupervisedPrototypeModel): class HeskesSOM(UnsupervisedPrototypeModel):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -78,6 +80,7 @@ class HeskesSOM(UnsupervisedPrototypeModel):
class NeuralGas(UnsupervisedPrototypeModel): class NeuralGas(UnsupervisedPrototypeModel):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
@@ -110,6 +113,7 @@ class NeuralGas(UnsupervisedPrototypeModel):
class GrowingNeuralGas(NeuralGas): class GrowingNeuralGas(NeuralGas):
def __init__(self, hparams, **kwargs): def __init__(self, hparams, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)

View File

@@ -11,6 +11,7 @@ from ..utils.utils import mesh2d
class Vis2DAbstract(pl.Callback): class Vis2DAbstract(pl.Callback):
def __init__(self, def __init__(self,
data, data,
title="Prototype Visualization", title="Prototype Visualization",
@@ -118,6 +119,7 @@ class Vis2DAbstract(pl.Callback):
class VisGLVQ2D(Vis2DAbstract): class VisGLVQ2D(Vis2DAbstract):
def on_epoch_end(self, trainer, pl_module): def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer): if not self.precheck(trainer):
return True return True
@@ -141,6 +143,7 @@ class VisGLVQ2D(Vis2DAbstract):
class VisSiameseGLVQ2D(Vis2DAbstract): class VisSiameseGLVQ2D(Vis2DAbstract):
def __init__(self, *args, map_protos=True, **kwargs): def __init__(self, *args, map_protos=True, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.map_protos = map_protos self.map_protos = map_protos
@@ -178,7 +181,42 @@ class VisSiameseGLVQ2D(Vis2DAbstract):
self.log_and_display(trainer, pl_module) self.log_and_display(trainer, pl_module)
class VisGMLVQ2D(Vis2DAbstract):
def __init__(self, *args, ev_proj=True, **kwargs):
super().__init__(*args, **kwargs)
self.ev_proj = ev_proj
def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer):
return True
protos = pl_module.prototypes
plabels = pl_module.prototype_labels
x_train, y_train = self.x_train, self.y_train
device = pl_module.device
omega = pl_module._omega.detach()
lam = omega @ omega.T
u, _, _ = torch.pca_lowrank(lam, q=2)
with torch.no_grad():
x_train = torch.Tensor(x_train).to(device)
x_train = x_train @ u
x_train = x_train.cpu().detach()
if self.show_protos:
with torch.no_grad():
protos = torch.Tensor(protos).to(device)
protos = protos @ u
protos = protos.cpu().detach()
ax = self.setup_ax()
self.plot_data(ax, x_train, y_train)
if self.show_protos:
self.plot_protos(ax, protos, plabels)
self.log_and_display(trainer, pl_module)
class VisCBC2D(Vis2DAbstract): class VisCBC2D(Vis2DAbstract):
def on_epoch_end(self, trainer, pl_module): def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer): if not self.precheck(trainer):
return True return True
@@ -202,6 +240,7 @@ class VisCBC2D(Vis2DAbstract):
class VisNG2D(Vis2DAbstract): class VisNG2D(Vis2DAbstract):
def on_epoch_end(self, trainer, pl_module): def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer): if not self.precheck(trainer):
return True return True
@@ -229,6 +268,7 @@ class VisNG2D(Vis2DAbstract):
class VisImgComp(Vis2DAbstract): class VisImgComp(Vis2DAbstract):
def __init__(self, def __init__(self,
*args, *args,
random_data=0, random_data=0,

View File

@@ -1,8 +1,23 @@
[isort]
profile = hug
src_paths = isort, test
[yapf] [yapf]
based_on_style = pep8 based_on_style = pep8
spaces_before_comment = 2 spaces_before_comment = 2
split_before_logical_operator = true split_before_logical_operator = true
[pylint]
disable =
too-many-arguments,
too-few-public-methods,
fixme,
[pycodestyle]
max-line-length = 79
[isort]
profile = hug
src_paths = isort, test
multi_line_output = 3
include_trailing_comma = True
force_grid_wrap = 3
use_parentheses = True
line_length = 79

View File

@@ -37,6 +37,7 @@ DOCS = [
"recommonmark", "recommonmark",
"sphinx", "sphinx",
"nbsphinx", "nbsphinx",
"ipykernel",
"sphinx_rtd_theme", "sphinx_rtd_theme",
"sphinxcontrib-katex", "sphinxcontrib-katex",
"sphinxcontrib-bibtex", "sphinxcontrib-bibtex",
@@ -53,7 +54,7 @@ ALL = CLI + DEV + DOCS + EXAMPLES + TESTS
setup( setup(
name=safe_name("prototorch_" + PLUGIN_NAME), name=safe_name("prototorch_" + PLUGIN_NAME),
version="0.3.0", version="0.4.1",
description="Pre-packaged prototype-based " description="Pre-packaged prototype-based "
"machine learning models using ProtoTorch and PyTorch-Lightning.", "machine learning models using ProtoTorch and PyTorch-Lightning.",
long_description=long_description, long_description=long_description,

View File

@@ -4,6 +4,7 @@ import unittest
class TestDummy(unittest.TestCase): class TestDummy(unittest.TestCase):
def setUp(self): def setUp(self):
pass pass