Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
aeb6417c28 | ||
|
cb7fb91c95 | ||
|
823b05e390 |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.4.1
|
||||
current_version = 0.2.0
|
||||
commit = True
|
||||
tag = True
|
||||
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)
|
||||
|
15
.codacy.yml
Normal file
15
.codacy.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
# To validate the contents of your configuration file
|
||||
# run the following command in the folder where the configuration file is located:
|
||||
# codacy-analysis-cli validate-configuration --directory `pwd`
|
||||
# To analyse, run:
|
||||
# codacy-analysis-cli analyse --tool remark-lint --directory `pwd`
|
||||
---
|
||||
engines:
|
||||
pylintpython3:
|
||||
exclude_paths:
|
||||
- config/engines.yml
|
||||
remark-lint:
|
||||
exclude_paths:
|
||||
- config/engines.yml
|
||||
exclude_paths:
|
||||
- 'tests/**'
|
2
.codecov.yml
Normal file
2
.codecov.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
comment:
|
||||
require_changes: yes
|
25
.github/workflows/examples.yml
vendored
25
.github/workflows/examples.yml
vendored
@@ -1,25 +0,0 @@
|
||||
# Thi workflow will install Python dependencies, run tests and lint with a single version of Python
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: examples
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'examples/**.py'
|
||||
jobs:
|
||||
cpu:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install .[all]
|
||||
- name: Run examples
|
||||
run: |
|
||||
./tests/test_examples.sh examples/
|
73
.github/workflows/pythonapp.yml
vendored
73
.github/workflows/pythonapp.yml
vendored
@@ -1,73 +0,0 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a single version of Python
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: tests
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
style:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install .[all]
|
||||
- uses: pre-commit/action@v2.0.3
|
||||
compatibility:
|
||||
needs: style
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.7", "3.8", "3.9"]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
python-version: "3.7"
|
||||
- os: windows-latest
|
||||
python-version: "3.8"
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install .[all]
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
publish_pypi:
|
||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
|
||||
needs: compatibility
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.9"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install .[all]
|
||||
pip install wheel
|
||||
- name: Build package
|
||||
run: python setup.py sdist bdist_wheel
|
||||
- name: Publish a Python distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
with:
|
||||
user: __token__
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
@@ -3,7 +3,7 @@
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.1.0
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
@@ -18,19 +18,19 @@ repos:
|
||||
- id: autoflake
|
||||
|
||||
- repo: http://github.com/PyCQA/isort
|
||||
rev: 5.10.1
|
||||
rev: 5.8.0
|
||||
hooks:
|
||||
- id: isort
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.931
|
||||
rev: v0.902
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: prototorch
|
||||
additional_dependencies: [types-pkg_resources]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
||||
rev: v0.32.0
|
||||
rev: v0.31.0
|
||||
hooks:
|
||||
- id: yapf
|
||||
|
||||
@@ -42,7 +42,7 @@ repos:
|
||||
- id: python-check-blanket-noqa
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.31.0
|
||||
rev: v2.19.4
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
|
||||
|
25
.travis.yml
Normal file
25
.travis.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
dist: bionic
|
||||
sudo: false
|
||||
language: python
|
||||
python: 3.9
|
||||
cache:
|
||||
directories:
|
||||
- "$HOME/.cache/pip"
|
||||
- "./tests/artifacts"
|
||||
- "$HOME/datasets"
|
||||
install:
|
||||
- pip install git+git://github.com/si-cim/prototorch@dev --progress-bar off
|
||||
- pip install .[all] --progress-bar off
|
||||
script:
|
||||
- coverage run -m pytest
|
||||
- ./tests/test_examples.sh examples/
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
deploy:
|
||||
provider: pypi
|
||||
username: __token__
|
||||
password:
|
||||
secure: PDoASdYdVlt1aIROYilAsCW6XpBs/TDel0CSptDzX0CI7i4+ksEW6Jk0JyL58bQt7V4F8PeGty4A8SODzAUIk2d8sty5RI4VJjvXZFCXlUsW+JGUN3EvWNqJLnwN8TDxgu2ENao37GUh0dC6pL8b6bVDGeOLaY1E/YR1jimmTJuxxjKjBIU8ByqTNBnC3rzybMTPU3nRoOM/WMQUyReHrPoUJj685sLqrLruhAqhiYsPbotP8xY6i8+KBbhp5vgiARV2+LkbeGcYZwozCzrEqPKY7YIfVPh895cw0v4NRyFwK1P2jyyIt22Z9Ni0Uy1J5/Qp9Sv6mBPeGjm3pnpDCQyS+2bNIDaj08KUYTIo1mC/Jcu4jQgppZEF+oey9q1tgGo+/JhsTeERKV9BoPF5HDiRArU1s5aWJjFnCsHfu+W1XqX8bwN3aTYsEIaApT3/irc6XyFJIfMN82+z+lUcZ4Y1yAHT3nH1Vif+pZYZB0UOSGrHwuI/UayjKzbCzHMuHWylWB/9ehd4o4YVp6iubVHc7Sj0KQkwBgwgl6TvwNcUuFsplFabCxmX0mVcavXsWiOBc+ivPmU6574zGj0JcEk5ghVgnKH+QS96aVrKOzegwbl4O13jY8dJp+/zgXl0gJOvRKr4BhuBJKcBaMQHdSKUChVsJJtqDyt59GvWcbg=
|
||||
on:
|
||||
tags: true
|
||||
skip_existing: true
|
@@ -1,5 +1,6 @@
|
||||
# ProtoTorch Models
|
||||
|
||||
[](https://travis-ci.com/github/si-cim/prototorch_models)
|
||||
[](https://github.com/si-cim/prototorch_models/releases)
|
||||
[](https://pypi.org/project/prototorch_models/)
|
||||
[](https://github.com/si-cim/prototorch_models/blob/master/LICENSE)
|
||||
|
@@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
#
|
||||
release = "0.4.1"
|
||||
release = "0.2.0"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
|
81
examples/binnam_tecator.py
Normal file
81
examples/binnam_tecator.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Neural Additive Model (NAM) example for binary classification."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
train_ds = pt.datasets.Tecator("~/datasets")
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64)
|
||||
|
||||
# Hyperparameters
|
||||
hparams = dict(lr=0.1)
|
||||
|
||||
# Define the feature extractor
|
||||
class FE(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.modules_list = torch.nn.ModuleList([
|
||||
torch.nn.Linear(1, 3),
|
||||
torch.nn.Sigmoid(),
|
||||
torch.nn.Linear(3, 1),
|
||||
torch.nn.Sigmoid(),
|
||||
])
|
||||
|
||||
def forward(self, x):
|
||||
for m in self.modules_list:
|
||||
x = m(x)
|
||||
return x
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.BinaryNAM(
|
||||
hparams,
|
||||
extractors=torch.nn.ModuleList([FE() for _ in range(100)]),
|
||||
)
|
||||
|
||||
# Compute intermediate input and output sizes
|
||||
model.example_input_array = torch.zeros(4, 100)
|
||||
|
||||
# Callbacks
|
||||
es = pl.callbacks.EarlyStopping(
|
||||
monitor="train_loss",
|
||||
min_delta=0.001,
|
||||
patience=20,
|
||||
mode="min",
|
||||
verbose=True,
|
||||
check_on_train_epoch_end=True,
|
||||
)
|
||||
|
||||
# Setup trainer
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[
|
||||
es,
|
||||
],
|
||||
terminate_on_nan=True,
|
||||
weights_summary=None,
|
||||
accelerator="ddp",
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
||||
|
||||
# Visualize extractor shape functions
|
||||
fig, axes = plt.subplots(10, 10)
|
||||
for i, ax in enumerate(axes.flat):
|
||||
x = torch.linspace(-2, 2, 100) # TODO use min/max from data
|
||||
y = model.extractors[i](x.view(100, 1)).squeeze().detach()
|
||||
ax.plot(x, y)
|
||||
ax.set(title=f"Feature {i + 1}", xticklabels=[], yticklabels=[])
|
||||
plt.show()
|
86
examples/binnam_xor.py
Normal file
86
examples/binnam_xor.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""Neural Additive Model (NAM) example for binary classification."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
train_ds = pt.datasets.XOR()
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=256)
|
||||
|
||||
# Hyperparameters
|
||||
hparams = dict(lr=0.001)
|
||||
|
||||
# Define the feature extractor
|
||||
class FE(torch.nn.Module):
|
||||
def __init__(self, hidden_size=10):
|
||||
super().__init__()
|
||||
self.modules_list = torch.nn.ModuleList([
|
||||
torch.nn.Linear(1, hidden_size),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(hidden_size, 1),
|
||||
torch.nn.ReLU(),
|
||||
])
|
||||
|
||||
def forward(self, x):
|
||||
for m in self.modules_list:
|
||||
x = m(x)
|
||||
return x
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.BinaryNAM(
|
||||
hparams,
|
||||
extractors=torch.nn.ModuleList([FE(20) for _ in range(2)]),
|
||||
)
|
||||
|
||||
# Compute intermediate input and output sizes
|
||||
model.example_input_array = torch.zeros(4, 2)
|
||||
|
||||
# Summary
|
||||
print(model)
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.Vis2D(data=train_ds)
|
||||
es = pl.callbacks.EarlyStopping(
|
||||
monitor="train_loss",
|
||||
min_delta=0.001,
|
||||
patience=50,
|
||||
mode="min",
|
||||
verbose=False,
|
||||
check_on_train_epoch_end=True,
|
||||
)
|
||||
|
||||
# Setup trainer
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[
|
||||
vis,
|
||||
es,
|
||||
],
|
||||
terminate_on_nan=True,
|
||||
weights_summary="full",
|
||||
accelerator="ddp",
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
||||
|
||||
# Visualize extractor shape functions
|
||||
fig, axes = plt.subplots(2)
|
||||
for i, ax in enumerate(axes.flat):
|
||||
x = torch.linspace(0, 1, 100) # TODO use min/max from data
|
||||
y = model.extractors[i](x.view(100, 1)).squeeze().detach()
|
||||
ax.plot(x, y)
|
||||
ax.set(title=f"Feature {i + 1}")
|
||||
plt.show()
|
8
examples/cli/README.md
Normal file
8
examples/cli/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Examples using Lightning CLI
|
||||
|
||||
Examples in this folder use the experimental [Lightning CLI](https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_cli.html).
|
||||
|
||||
To use the example run
|
||||
```
|
||||
python gmlvq.py --config gmlvq.yaml
|
||||
```
|
19
examples/cli/gmlvq.py
Normal file
19
examples/cli/gmlvq.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""GMLVQ example using the MNIST dataset."""
|
||||
|
||||
import prototorch as pt
|
||||
import torch
|
||||
from prototorch.models import ImageGMLVQ
|
||||
from prototorch.models.abstract import PrototypeModel
|
||||
from prototorch.models.data import MNISTDataModule
|
||||
from pytorch_lightning.utilities.cli import LightningCLI
|
||||
|
||||
|
||||
class ExperimentClass(ImageGMLVQ):
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams,
|
||||
optimizer=torch.optim.Adam,
|
||||
prototype_initializer=pt.components.zeros(28 * 28),
|
||||
**kwargs)
|
||||
|
||||
|
||||
cli = LightningCLI(ImageGMLVQ, MNISTDataModule)
|
11
examples/cli/gmlvq.yaml
Normal file
11
examples/cli/gmlvq.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
model:
|
||||
hparams:
|
||||
input_dim: 784
|
||||
latent_dim: 784
|
||||
distribution:
|
||||
num_classes: 10
|
||||
prototypes_per_class: 2
|
||||
proto_lr: 0.01
|
||||
bb_lr: 0.01
|
||||
data:
|
||||
batch_size: 32
|
@@ -1,4 +1,4 @@
|
||||
"""GMLVQ example using the spiral dataset."""
|
||||
"""GLVQ example using the spiral dataset."""
|
||||
|
||||
import argparse
|
||||
|
@@ -1,58 +0,0 @@
|
||||
"""GMLVQ example using the Iris dataset."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from torch.optim.lr_scheduler import ExponentialLR
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
train_ds = pt.datasets.Iris()
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64)
|
||||
|
||||
# Hyperparameters
|
||||
hparams = dict(
|
||||
input_dim=4,
|
||||
latent_dim=4,
|
||||
distribution={
|
||||
"num_classes": 3,
|
||||
"per_class": 2
|
||||
},
|
||||
proto_lr=0.01,
|
||||
bb_lr=0.01,
|
||||
)
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.GMLVQ(
|
||||
hparams,
|
||||
optimizer=torch.optim.Adam,
|
||||
prototypes_initializer=pt.initializers.SMCI(train_ds),
|
||||
lr_scheduler=ExponentialLR,
|
||||
lr_scheduler_kwargs=dict(gamma=0.99, verbose=False),
|
||||
)
|
||||
|
||||
# Compute intermediate input and output sizes
|
||||
model.example_input_array = torch.zeros(4, 4)
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisGMLVQ2D(data=train_ds)
|
||||
|
||||
# Setup trainer
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[vis],
|
||||
weights_summary="full",
|
||||
accelerator="ddp",
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
@@ -1,104 +0,0 @@
|
||||
"""GTLVQ example using the MNIST dataset."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import MNIST
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
train_ds = MNIST(
|
||||
"~/datasets",
|
||||
train=True,
|
||||
download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
]),
|
||||
)
|
||||
test_ds = MNIST(
|
||||
"~/datasets",
|
||||
train=False,
|
||||
download=True,
|
||||
transform=transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
]),
|
||||
)
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds,
|
||||
num_workers=0,
|
||||
batch_size=256)
|
||||
test_loader = torch.utils.data.DataLoader(test_ds,
|
||||
num_workers=0,
|
||||
batch_size=256)
|
||||
|
||||
# Hyperparameters
|
||||
num_classes = 10
|
||||
prototypes_per_class = 1
|
||||
hparams = dict(
|
||||
input_dim=28 * 28,
|
||||
latent_dim=28,
|
||||
distribution=(num_classes, prototypes_per_class),
|
||||
proto_lr=0.01,
|
||||
bb_lr=0.01,
|
||||
)
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.ImageGTLVQ(
|
||||
hparams,
|
||||
optimizer=torch.optim.Adam,
|
||||
prototypes_initializer=pt.initializers.SMCI(train_ds),
|
||||
#Use one batch of data for subspace initiator.
|
||||
omega_initializer=pt.initializers.PCALinearTransformInitializer(
|
||||
next(iter(train_loader))[0].reshape(256, 28 * 28)))
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisImgComp(
|
||||
data=train_ds,
|
||||
num_columns=10,
|
||||
show=False,
|
||||
tensorboard=True,
|
||||
random_data=100,
|
||||
add_embedding=True,
|
||||
embedding_data=200,
|
||||
flatten_data=False,
|
||||
)
|
||||
pruning = pt.models.PruneLoserPrototypes(
|
||||
threshold=0.01,
|
||||
idle_epochs=1,
|
||||
prune_quota_per_epoch=10,
|
||||
frequency=1,
|
||||
verbose=True,
|
||||
)
|
||||
es = pl.callbacks.EarlyStopping(
|
||||
monitor="train_loss",
|
||||
min_delta=0.001,
|
||||
patience=15,
|
||||
mode="min",
|
||||
check_on_train_epoch_end=True,
|
||||
)
|
||||
|
||||
# Setup trainer
|
||||
# using GPUs here is strongly recommended!
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[
|
||||
vis,
|
||||
pruning,
|
||||
# es,
|
||||
],
|
||||
terminate_on_nan=True,
|
||||
weights_summary=None,
|
||||
accelerator="ddp",
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
@@ -1,63 +0,0 @@
|
||||
"""Localized-GTLVQ example using the Moons dataset."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Reproducibility
|
||||
pl.utilities.seed.seed_everything(seed=2)
|
||||
|
||||
# Dataset
|
||||
train_ds = pt.datasets.Moons(num_samples=300, noise=0.2, seed=42)
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds,
|
||||
batch_size=256,
|
||||
shuffle=True)
|
||||
|
||||
# Hyperparameters
|
||||
# Latent_dim should be lower than input dim.
|
||||
hparams = dict(distribution=[1, 3], input_dim=2, latent_dim=1)
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.GTLVQ(
|
||||
hparams, prototypes_initializer=pt.initializers.SMCI(train_ds))
|
||||
|
||||
# Compute intermediate input and output sizes
|
||||
model.example_input_array = torch.zeros(4, 2)
|
||||
|
||||
# Summary
|
||||
print(model)
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisGLVQ2D(data=train_ds)
|
||||
es = pl.callbacks.EarlyStopping(
|
||||
monitor="train_acc",
|
||||
min_delta=0.001,
|
||||
patience=20,
|
||||
mode="max",
|
||||
verbose=False,
|
||||
check_on_train_epoch_end=True,
|
||||
)
|
||||
|
||||
# Setup trainer
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[
|
||||
vis,
|
||||
es,
|
||||
],
|
||||
weights_summary="full",
|
||||
accelerator="ddp",
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
@@ -6,7 +6,6 @@ import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
@@ -15,20 +14,12 @@ if __name__ == "__main__":
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
X, y = load_iris(return_X_y=True)
|
||||
X = X[:, [0, 2]]
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(X,
|
||||
y,
|
||||
test_size=0.5,
|
||||
random_state=42)
|
||||
|
||||
train_ds = pt.datasets.NumpyDataset(X_train, y_train)
|
||||
test_ds = pt.datasets.NumpyDataset(X_test, y_test)
|
||||
x_train, y_train = load_iris(return_X_y=True)
|
||||
x_train = x_train[:, [0, 2]]
|
||||
train_ds = pt.datasets.NumpyDataset(x_train, y_train)
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=16)
|
||||
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=16)
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=150)
|
||||
|
||||
# Hyperparameters
|
||||
hparams = dict(k=5)
|
||||
@@ -44,7 +35,7 @@ if __name__ == "__main__":
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisGLVQ2D(
|
||||
data=(X_train, y_train),
|
||||
data=(x_train, y_train),
|
||||
resolution=200,
|
||||
block=True,
|
||||
)
|
||||
@@ -62,8 +53,5 @@ if __name__ == "__main__":
|
||||
trainer.fit(model, train_loader)
|
||||
|
||||
# Recall
|
||||
y_pred = model.predict(torch.tensor(X_train))
|
||||
y_pred = model.predict(torch.tensor(x_train))
|
||||
print(y_pred)
|
||||
|
||||
# Test
|
||||
trainer.test(model, dataloaders=test_loader)
|
||||
|
@@ -10,7 +10,6 @@ from prototorch.utils.colors import hex_to_rgb
|
||||
|
||||
|
||||
class Vis2DColorSOM(pl.Callback):
|
||||
|
||||
def __init__(self, data, title="ColorSOMe", pause_time=0.1):
|
||||
super().__init__()
|
||||
self.title = title
|
||||
|
@@ -8,7 +8,6 @@ import torch
|
||||
|
||||
|
||||
class Backbone(torch.nn.Module):
|
||||
|
||||
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
|
||||
super().__init__()
|
||||
self.input_size = input_size
|
||||
|
@@ -8,7 +8,6 @@ import torch
|
||||
|
||||
|
||||
class Backbone(torch.nn.Module):
|
||||
|
||||
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
|
||||
super().__init__()
|
||||
self.input_size = input_size
|
||||
|
@@ -1,73 +0,0 @@
|
||||
"""Siamese GTLVQ example using all four dimensions of the Iris dataset."""
|
||||
|
||||
import argparse
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
|
||||
|
||||
class Backbone(torch.nn.Module):
|
||||
|
||||
def __init__(self, input_size=4, hidden_size=10, latent_size=2):
|
||||
super().__init__()
|
||||
self.input_size = input_size
|
||||
self.hidden_size = hidden_size
|
||||
self.latent_size = latent_size
|
||||
self.dense1 = torch.nn.Linear(self.input_size, self.hidden_size)
|
||||
self.dense2 = torch.nn.Linear(self.hidden_size, self.latent_size)
|
||||
self.activation = torch.nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.activation(self.dense1(x))
|
||||
out = self.activation(self.dense2(x))
|
||||
return out
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Command-line arguments
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = pl.Trainer.add_argparse_args(parser)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Dataset
|
||||
train_ds = pt.datasets.Iris()
|
||||
|
||||
# Reproducibility
|
||||
pl.utilities.seed.seed_everything(seed=2)
|
||||
|
||||
# Dataloaders
|
||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=150)
|
||||
|
||||
# Hyperparameters
|
||||
hparams = dict(distribution=[1, 2, 3],
|
||||
proto_lr=0.01,
|
||||
bb_lr=0.01,
|
||||
input_dim=2,
|
||||
latent_dim=1)
|
||||
|
||||
# Initialize the backbone
|
||||
backbone = Backbone(latent_size=hparams["input_dim"])
|
||||
|
||||
# Initialize the model
|
||||
model = pt.models.SiameseGTLVQ(
|
||||
hparams,
|
||||
prototypes_initializer=pt.initializers.SMCI(train_ds),
|
||||
backbone=backbone,
|
||||
both_path_gradients=False,
|
||||
)
|
||||
|
||||
# Model summary
|
||||
print(model)
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisSiameseGLVQ2D(data=train_ds, border=0.1)
|
||||
|
||||
# Setup trainer
|
||||
trainer = pl.Trainer.from_argparse_args(
|
||||
args,
|
||||
callbacks=[vis],
|
||||
)
|
||||
|
||||
# Training loop
|
||||
trainer.fit(model, train_loader)
|
@@ -1,5 +1,7 @@
|
||||
"""`models` plugin for the `prototorch` package."""
|
||||
|
||||
from importlib.metadata import PackageNotFoundError, version
|
||||
|
||||
from .callbacks import PrototypeConvergence, PruneLoserPrototypes
|
||||
from .cbc import CBC, ImageCBC
|
||||
from .glvq import (
|
||||
@@ -8,34 +10,18 @@ from .glvq import (
|
||||
GLVQ21,
|
||||
GMLVQ,
|
||||
GRLVQ,
|
||||
GTLVQ,
|
||||
LGMLVQ,
|
||||
LVQMLN,
|
||||
ImageGLVQ,
|
||||
ImageGMLVQ,
|
||||
ImageGTLVQ,
|
||||
SiameseGLVQ,
|
||||
SiameseGMLVQ,
|
||||
SiameseGTLVQ,
|
||||
)
|
||||
from .knn import KNN
|
||||
from .lvq import (
|
||||
LVQ1,
|
||||
LVQ21,
|
||||
MedianLVQ,
|
||||
)
|
||||
from .probabilistic import (
|
||||
CELVQ,
|
||||
PLVQ,
|
||||
RSLVQ,
|
||||
SLVQ,
|
||||
)
|
||||
from .unsupervised import (
|
||||
GrowingNeuralGas,
|
||||
HeskesSOM,
|
||||
KohonenSOM,
|
||||
NeuralGas,
|
||||
)
|
||||
from .lvq import LVQ1, LVQ21, MedianLVQ
|
||||
from .nam import BinaryNAM
|
||||
from .probabilistic import CELVQ, PLVQ, RSLVQ, SLVQ
|
||||
from .unsupervised import GrowingNeuralGas, HeskesSOM, KohonenSOM, NeuralGas
|
||||
from .vis import *
|
||||
|
||||
__version__ = "0.4.1"
|
||||
__version__ = "0.2.0"
|
||||
|
@@ -1,5 +1,7 @@
|
||||
"""Abstract classes to be inherited by prototorch models."""
|
||||
|
||||
from typing import Final, final
|
||||
|
||||
import pytorch_lightning as pl
|
||||
import torch
|
||||
import torchmetrics
|
||||
@@ -14,7 +16,6 @@ from ..nn.wrappers import LambdaLayer
|
||||
|
||||
class ProtoTorchBolt(pl.LightningModule):
|
||||
"""All ProtoTorch models are ProtoTorch Bolts."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__()
|
||||
|
||||
@@ -42,6 +43,7 @@ class ProtoTorchBolt(pl.LightningModule):
|
||||
else:
|
||||
return optimizer
|
||||
|
||||
@final
|
||||
def reconfigure_optimizers(self):
|
||||
self.trainer.accelerator.setup_optimizers(self.trainer)
|
||||
|
||||
@@ -53,7 +55,6 @@ class ProtoTorchBolt(pl.LightningModule):
|
||||
|
||||
|
||||
class PrototypeModel(ProtoTorchBolt):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -83,7 +84,6 @@ class PrototypeModel(ProtoTorchBolt):
|
||||
|
||||
|
||||
class UnsupervisedPrototypeModel(PrototypeModel):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -96,7 +96,7 @@ class UnsupervisedPrototypeModel(PrototypeModel):
|
||||
)
|
||||
|
||||
def compute_distances(self, x):
|
||||
protos = self.proto_layer().type_as(x)
|
||||
protos = self.proto_layer()
|
||||
distances = self.distance_layer(x, protos)
|
||||
return distances
|
||||
|
||||
@@ -106,7 +106,6 @@ class UnsupervisedPrototypeModel(PrototypeModel):
|
||||
|
||||
|
||||
class SupervisedPrototypeModel(PrototypeModel):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -137,14 +136,14 @@ class SupervisedPrototypeModel(PrototypeModel):
|
||||
|
||||
def forward(self, x):
|
||||
distances = self.compute_distances(x)
|
||||
_, plabels = self.proto_layer()
|
||||
plabels = self.proto_layer.labels
|
||||
winning = stratified_min_pooling(distances, plabels)
|
||||
y_pred = torch.nn.functional.softmin(winning, dim=1)
|
||||
y_pred = torch.nn.functional.softmin(winning)
|
||||
return y_pred
|
||||
|
||||
def predict_from_distances(self, distances):
|
||||
with torch.no_grad():
|
||||
_, plabels = self.proto_layer()
|
||||
plabels = self.proto_layer.labels
|
||||
y_pred = self.competition_layer(distances, plabels)
|
||||
return y_pred
|
||||
|
||||
@@ -166,14 +165,6 @@ class SupervisedPrototypeModel(PrototypeModel):
|
||||
prog_bar=True,
|
||||
logger=True)
|
||||
|
||||
def test_step(self, batch, batch_idx):
|
||||
x, targets = batch
|
||||
|
||||
preds = self.predict(x)
|
||||
accuracy = torchmetrics.functional.accuracy(preds.int(), targets.int())
|
||||
|
||||
self.log("test_acc", accuracy)
|
||||
|
||||
|
||||
class ProtoTorchMixin(object):
|
||||
"""All mixins are ProtoTorchMixins."""
|
||||
@@ -182,10 +173,9 @@ class ProtoTorchMixin(object):
|
||||
|
||||
class NonGradientMixin(ProtoTorchMixin):
|
||||
"""Mixin for custom non-gradient optimization."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.automatic_optimization = False
|
||||
self.automatic_optimization: Final = False
|
||||
|
||||
def training_step(self, train_batch, batch_idx, optimizer_idx=None):
|
||||
raise NotImplementedError
|
||||
@@ -193,7 +183,7 @@ class NonGradientMixin(ProtoTorchMixin):
|
||||
|
||||
class ImagePrototypesMixin(ProtoTorchMixin):
|
||||
"""Mixin for models with image prototypes."""
|
||||
|
||||
@final
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
|
||||
"""Constrain the components to the range [0, 1] by clamping after updates."""
|
||||
self.proto_layer.components.data.clamp_(0.0, 1.0)
|
||||
|
@@ -11,7 +11,6 @@ from .extras import ConnectionTopology
|
||||
|
||||
|
||||
class PruneLoserPrototypes(pl.Callback):
|
||||
|
||||
def __init__(self,
|
||||
threshold=0.01,
|
||||
idle_epochs=10,
|
||||
@@ -56,7 +55,7 @@ class PruneLoserPrototypes(pl.Callback):
|
||||
distribution = dict(zip(labels.tolist(), counts.tolist()))
|
||||
if self.verbose:
|
||||
print(f"Re-adding pruned prototypes...")
|
||||
print(f"distribution={distribution}")
|
||||
print(f"{distribution=}")
|
||||
pl_module.add_prototypes(
|
||||
distribution=distribution,
|
||||
components_initializer=self.prototypes_initializer)
|
||||
@@ -68,7 +67,6 @@ class PruneLoserPrototypes(pl.Callback):
|
||||
|
||||
|
||||
class PrototypeConvergence(pl.Callback):
|
||||
|
||||
def __init__(self, min_delta=0.01, idle_epochs=10, verbose=False):
|
||||
self.min_delta = min_delta
|
||||
self.idle_epochs = idle_epochs # epochs to wait
|
||||
@@ -91,7 +89,6 @@ class GNGCallback(pl.Callback):
|
||||
Based on "A Growing Neural Gas Network Learns Topologies" by Bernd Fritzke.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, reduction=0.1, freq=10):
|
||||
self.reduction = reduction
|
||||
self.freq = freq
|
||||
@@ -137,4 +134,4 @@ class GNGCallback(pl.Callback):
|
||||
pl_module.errors[
|
||||
worst_neighbor] = errors[worst_neighbor] * self.reduction
|
||||
|
||||
trainer.accelerator.setup_optimizers(trainer)
|
||||
trainer.accelerator_backend.setup_optimizers(trainer)
|
||||
|
@@ -13,7 +13,6 @@ from .glvq import SiameseGLVQ
|
||||
|
||||
class CBC(SiameseGLVQ):
|
||||
"""Classification-By-Components."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
|
123
prototorch/models/data.py
Normal file
123
prototorch/models/data.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Prototorch Data Modules
|
||||
|
||||
This allows to store the used dataset inside a Lightning Module.
|
||||
Mainly used for PytorchLightningCLI configurations.
|
||||
"""
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import prototorch as pt
|
||||
import pytorch_lightning as pl
|
||||
from torch.utils.data import DataLoader, Dataset, random_split
|
||||
from torchvision import transforms
|
||||
from torchvision.datasets import MNIST
|
||||
|
||||
|
||||
# MNIST
|
||||
class MNISTDataModule(pl.LightningDataModule):
|
||||
def __init__(self, batch_size=32):
|
||||
super().__init__()
|
||||
self.batch_size = batch_size
|
||||
|
||||
# Download mnist dataset as side-effect, only called on the first cpu
|
||||
def prepare_data(self):
|
||||
MNIST("~/datasets", train=True, download=True)
|
||||
MNIST("~/datasets", train=False, download=True)
|
||||
|
||||
# called for every GPU/machine (assigning state is OK)
|
||||
def setup(self, stage=None):
|
||||
# Transforms
|
||||
transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
])
|
||||
# Split dataset
|
||||
if stage in (None, "fit"):
|
||||
mnist_train = MNIST("~/datasets", train=True, transform=transform)
|
||||
self.mnist_train, self.mnist_val = random_split(
|
||||
mnist_train,
|
||||
[55000, 5000],
|
||||
)
|
||||
if stage == (None, "test"):
|
||||
self.mnist_test = MNIST(
|
||||
"~/datasets",
|
||||
train=False,
|
||||
transform=transform,
|
||||
)
|
||||
|
||||
# Dataloaders
|
||||
def train_dataloader(self):
|
||||
mnist_train = DataLoader(self.mnist_train, batch_size=self.batch_size)
|
||||
return mnist_train
|
||||
|
||||
def val_dataloader(self):
|
||||
mnist_val = DataLoader(self.mnist_val, batch_size=self.batch_size)
|
||||
return mnist_val
|
||||
|
||||
def test_dataloader(self):
|
||||
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
|
||||
return mnist_test
|
||||
|
||||
|
||||
# def train_on_mnist(batch_size=256) -> type:
|
||||
# class DataClass(pl.LightningModule):
|
||||
# datamodule = MNISTDataModule(batch_size=batch_size)
|
||||
|
||||
# def __init__(self, *args, **kwargs):
|
||||
# prototype_initializer = kwargs.pop(
|
||||
# "prototype_initializer", pt.components.Zeros((28, 28, 1)))
|
||||
# super().__init__(*args,
|
||||
# prototype_initializer=prototype_initializer,
|
||||
# **kwargs)
|
||||
|
||||
# dc: Type[DataClass] = DataClass
|
||||
# return dc
|
||||
|
||||
|
||||
# ABSTRACT
|
||||
class GeneralDataModule(pl.LightningDataModule):
|
||||
def __init__(self, dataset: Dataset, batch_size: int = 32) -> None:
|
||||
super().__init__()
|
||||
self.train_dataset = dataset
|
||||
self.batch_size = batch_size
|
||||
|
||||
def train_dataloader(self) -> DataLoader:
|
||||
return DataLoader(self.train_dataset, batch_size=self.batch_size)
|
||||
|
||||
|
||||
# def train_on_dataset(dataset: Dataset, batch_size: int = 256):
|
||||
# class DataClass(pl.LightningModule):
|
||||
# datamodule = GeneralDataModule(dataset, batch_size)
|
||||
# datashape = dataset[0][0].shape
|
||||
# example_input_array = torch.zeros_like(dataset[0][0]).unsqueeze(0)
|
||||
|
||||
# def __init__(self, *args: Any, **kwargs: Any) -> None:
|
||||
# prototype_initializer = kwargs.pop(
|
||||
# "prototype_initializer",
|
||||
# pt.components.Zeros(self.datashape),
|
||||
# )
|
||||
# super().__init__(*args,
|
||||
# prototype_initializer=prototype_initializer,
|
||||
# **kwargs)
|
||||
|
||||
# return DataClass
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# from prototorch.models import GLVQ
|
||||
|
||||
# demo_dataset = pt.datasets.Iris()
|
||||
|
||||
# TrainingClass: Type = train_on_dataset(demo_dataset)
|
||||
|
||||
# class DemoGLVQ(TrainingClass, GLVQ):
|
||||
# """Model Definition."""
|
||||
|
||||
# # Hyperparameters
|
||||
# hparams = dict(
|
||||
# distribution={
|
||||
# "num_classes": 3,
|
||||
# "prototypes_per_class": 4
|
||||
# },
|
||||
# lr=0.01,
|
||||
# )
|
||||
|
||||
# initialized = DemoGLVQ(hparams)
|
||||
# print(initialized)
|
@@ -15,46 +15,7 @@ def rank_scaled_gaussian(distances, lambd):
|
||||
return torch.exp(-torch.exp(-ranks / lambd) * distances)
|
||||
|
||||
|
||||
def orthogonalization(tensors):
|
||||
"""Orthogonalization via polar decomposition """
|
||||
u, _, v = torch.svd(tensors, compute_uv=True)
|
||||
u_shape = tuple(list(u.shape))
|
||||
v_shape = tuple(list(v.shape))
|
||||
|
||||
# reshape to (num x N x M)
|
||||
u = torch.reshape(u, (-1, u_shape[-2], u_shape[-1]))
|
||||
v = torch.reshape(v, (-1, v_shape[-2], v_shape[-1]))
|
||||
|
||||
out = u @ v.permute([0, 2, 1])
|
||||
|
||||
out = torch.reshape(out, u_shape[:-1] + (v_shape[-2], ))
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def ltangent_distance(x, y, omegas):
|
||||
r"""Localized Tangent distance.
|
||||
Compute Orthogonal Complement: math:`\bm P_k = \bm I - \Omega_k \Omega_k^T`
|
||||
Compute Tangent Distance: math:`{\| \bm P \bm x - \bm P_k \bm y_k \|}_2`
|
||||
|
||||
:param `torch.tensor` omegas: Three dimensional matrix
|
||||
:rtype: `torch.tensor`
|
||||
"""
|
||||
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||
p = torch.eye(omegas.shape[-2], device=omegas.device) - torch.bmm(
|
||||
omegas, omegas.permute([0, 2, 1]))
|
||||
projected_x = x @ p
|
||||
projected_y = torch.diagonal(y @ p).T
|
||||
expanded_y = torch.unsqueeze(projected_y, dim=1)
|
||||
batchwise_difference = expanded_y - projected_x
|
||||
differences_squared = batchwise_difference**2
|
||||
distances = torch.sqrt(torch.sum(differences_squared, dim=2))
|
||||
distances = distances.permute(1, 0)
|
||||
return distances
|
||||
|
||||
|
||||
class GaussianPrior(torch.nn.Module):
|
||||
|
||||
def __init__(self, variance):
|
||||
super().__init__()
|
||||
self.variance = variance
|
||||
@@ -64,7 +25,6 @@ class GaussianPrior(torch.nn.Module):
|
||||
|
||||
|
||||
class RankScaledGaussianPrior(torch.nn.Module):
|
||||
|
||||
def __init__(self, lambd):
|
||||
super().__init__()
|
||||
self.lambd = lambd
|
||||
@@ -74,7 +34,6 @@ class RankScaledGaussianPrior(torch.nn.Module):
|
||||
|
||||
|
||||
class ConnectionTopology(torch.nn.Module):
|
||||
|
||||
def __init__(self, agelimit, num_prototypes):
|
||||
super().__init__()
|
||||
self.agelimit = agelimit
|
||||
|
@@ -4,26 +4,16 @@ import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from ..core.competitions import wtac
|
||||
from ..core.distances import (
|
||||
lomega_distance,
|
||||
omega_distance,
|
||||
squared_euclidean_distance,
|
||||
)
|
||||
from ..core.distances import lomega_distance, omega_distance, squared_euclidean_distance
|
||||
from ..core.initializers import EyeTransformInitializer
|
||||
from ..core.losses import (
|
||||
GLVQLoss,
|
||||
lvq1_loss,
|
||||
lvq21_loss,
|
||||
)
|
||||
from ..core.losses import GLVQLoss, lvq1_loss, lvq21_loss
|
||||
from ..core.transforms import LinearTransform
|
||||
from ..nn.wrappers import LambdaLayer, LossLayer
|
||||
from .abstract import ImagePrototypesMixin, SupervisedPrototypeModel
|
||||
from .extras import ltangent_distance, orthogonalization
|
||||
|
||||
|
||||
class GLVQ(SupervisedPrototypeModel):
|
||||
"""Generalized Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -65,7 +55,7 @@ class GLVQ(SupervisedPrototypeModel):
|
||||
def shared_step(self, batch, batch_idx, optimizer_idx=None):
|
||||
x, y = batch
|
||||
out = self.compute_distances(x)
|
||||
_, plabels = self.proto_layer()
|
||||
plabels = self.proto_layer.labels
|
||||
loss = self.loss(out, y, plabels)
|
||||
return out, loss
|
||||
|
||||
@@ -108,7 +98,6 @@ class SiameseGLVQ(GLVQ):
|
||||
transformation pipeline are only learned from the inputs.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
hparams,
|
||||
backbone=torch.nn.Identity(),
|
||||
@@ -123,8 +112,7 @@ class SiameseGLVQ(GLVQ):
|
||||
proto_opt = self.optimizer(self.proto_layer.parameters(),
|
||||
lr=self.hparams.proto_lr)
|
||||
# Only add a backbone optimizer if backbone has trainable parameters
|
||||
bb_params = list(self.backbone.parameters())
|
||||
if (bb_params):
|
||||
if (bb_params := list(self.backbone.parameters())):
|
||||
bb_opt = self.optimizer(bb_params, lr=self.hparams.bb_lr)
|
||||
optimizers = [proto_opt, bb_opt]
|
||||
else:
|
||||
@@ -175,7 +163,6 @@ class LVQMLN(SiameseGLVQ):
|
||||
rather in the embedding space.
|
||||
|
||||
"""
|
||||
|
||||
def compute_distances(self, x):
|
||||
latent_protos, _ = self.proto_layer()
|
||||
latent_x = self.backbone(x)
|
||||
@@ -191,7 +178,6 @@ class GRLVQ(SiameseGLVQ):
|
||||
TODO Make a RelevanceLayer. `bb_lr` is ignored otherwise.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -217,7 +203,6 @@ class SiameseGMLVQ(SiameseGLVQ):
|
||||
Implemented as a Siamese network with a linear transformation backbone.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -248,7 +233,6 @@ class GMLVQ(GLVQ):
|
||||
function. This makes it easier to implement a localized variant.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
distance_fn = kwargs.pop("distance_fn", omega_distance)
|
||||
super().__init__(hparams, distance_fn=distance_fn, **kwargs)
|
||||
@@ -266,12 +250,6 @@ class GMLVQ(GLVQ):
|
||||
def omega_matrix(self):
|
||||
return self._omega.detach().cpu()
|
||||
|
||||
@property
|
||||
def lambda_matrix(self):
|
||||
omega = self._omega.detach() # (input_dim, latent_dim)
|
||||
lam = omega @ omega.T
|
||||
return lam.detach().cpu()
|
||||
|
||||
def compute_distances(self, x):
|
||||
protos, _ = self.proto_layer()
|
||||
distances = self.distance_layer(x, protos, self._omega)
|
||||
@@ -283,7 +261,6 @@ class GMLVQ(GLVQ):
|
||||
|
||||
class LGMLVQ(GMLVQ):
|
||||
"""Localized and Generalized Matrix Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
distance_fn = kwargs.pop("distance_fn", lomega_distance)
|
||||
super().__init__(hparams, distance_fn=distance_fn, **kwargs)
|
||||
@@ -298,48 +275,8 @@ class LGMLVQ(GMLVQ):
|
||||
self.register_parameter("_omega", Parameter(omega))
|
||||
|
||||
|
||||
class GTLVQ(LGMLVQ):
|
||||
"""Localized and Generalized Tangent Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
distance_fn = kwargs.pop("distance_fn", ltangent_distance)
|
||||
super().__init__(hparams, distance_fn=distance_fn, **kwargs)
|
||||
|
||||
omega_initializer = kwargs.get("omega_initializer")
|
||||
|
||||
if omega_initializer is not None:
|
||||
subspace = omega_initializer.generate(self.hparams.input_dim,
|
||||
self.hparams.latent_dim)
|
||||
omega = torch.repeat_interleave(subspace.unsqueeze(0),
|
||||
self.num_prototypes,
|
||||
dim=0)
|
||||
else:
|
||||
omega = torch.rand(
|
||||
self.num_prototypes,
|
||||
self.hparams.input_dim,
|
||||
self.hparams.latent_dim,
|
||||
device=self.device,
|
||||
)
|
||||
|
||||
# Re-register `_omega` to override the one from the super class.
|
||||
self.register_parameter("_omega", Parameter(omega))
|
||||
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
|
||||
with torch.no_grad():
|
||||
self._omega.copy_(orthogonalization(self._omega))
|
||||
|
||||
|
||||
class SiameseGTLVQ(SiameseGLVQ, GTLVQ):
|
||||
"""Generalized Tangent Learning Vector Quantization.
|
||||
|
||||
Implemented as a Siamese network with a linear transformation backbone.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class GLVQ1(GLVQ):
|
||||
"""Generalized Learning Vector Quantization 1."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
self.loss = LossLayer(lvq1_loss)
|
||||
@@ -348,7 +285,6 @@ class GLVQ1(GLVQ):
|
||||
|
||||
class GLVQ21(GLVQ):
|
||||
"""Generalized Learning Vector Quantization 2.1."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
self.loss = LossLayer(lvq21_loss)
|
||||
@@ -371,18 +307,3 @@ class ImageGMLVQ(ImagePrototypesMixin, GMLVQ):
|
||||
after updates.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ImageGTLVQ(ImagePrototypesMixin, GTLVQ):
|
||||
"""GTLVQ for training on image data.
|
||||
|
||||
GTLVQ model that constrains the prototypes to the range [0, 1] by clamping
|
||||
after updates.
|
||||
|
||||
"""
|
||||
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
|
||||
"""Constrain the components to the range [0, 1] by clamping after updates."""
|
||||
self.proto_layer.components.data.clamp_(0.0, 1.0)
|
||||
with torch.no_grad():
|
||||
self._omega.copy_(orthogonalization(self._omega))
|
||||
|
@@ -4,17 +4,13 @@ import warnings
|
||||
|
||||
from ..core.competitions import KNNC
|
||||
from ..core.components import LabeledComponents
|
||||
from ..core.initializers import (
|
||||
LiteralCompInitializer,
|
||||
LiteralLabelsInitializer,
|
||||
)
|
||||
from ..core.initializers import LiteralCompInitializer, LiteralLabelsInitializer
|
||||
from ..utils.utils import parse_data_arg
|
||||
from .abstract import SupervisedPrototypeModel
|
||||
|
||||
|
||||
class KNN(SupervisedPrototypeModel):
|
||||
"""K-Nearest-Neighbors classification algorithm."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
|
@@ -9,9 +9,10 @@ from .glvq import GLVQ
|
||||
|
||||
class LVQ1(NonGradientMixin, GLVQ):
|
||||
"""Learning Vector Quantization 1."""
|
||||
|
||||
def training_step(self, train_batch, batch_idx, optimizer_idx=None):
|
||||
protos, plables = self.proto_layer()
|
||||
protos = self.proto_layer.components
|
||||
plabels = self.proto_layer.labels
|
||||
|
||||
x, y = train_batch
|
||||
dis = self.compute_distances(x)
|
||||
# TODO Vectorized implementation
|
||||
@@ -29,8 +30,8 @@ class LVQ1(NonGradientMixin, GLVQ):
|
||||
self.proto_layer.load_state_dict({"_components": updated_protos},
|
||||
strict=False)
|
||||
|
||||
print(f"dis={dis}")
|
||||
print(f"y={y}")
|
||||
print(f"{dis=}")
|
||||
print(f"{y=}")
|
||||
# Logging
|
||||
self.log_acc(dis, y, tag="train_acc")
|
||||
|
||||
@@ -39,9 +40,9 @@ class LVQ1(NonGradientMixin, GLVQ):
|
||||
|
||||
class LVQ21(NonGradientMixin, GLVQ):
|
||||
"""Learning Vector Quantization 2.1."""
|
||||
|
||||
def training_step(self, train_batch, batch_idx, optimizer_idx=None):
|
||||
protos, plabels = self.proto_layer()
|
||||
protos = self.proto_layer.components
|
||||
plabels = self.proto_layer.labels
|
||||
|
||||
x, y = train_batch
|
||||
dis = self.compute_distances(x)
|
||||
@@ -72,7 +73,6 @@ class MedianLVQ(NonGradientMixin, GLVQ):
|
||||
# TODO Avoid computing distances over and over
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, hparams, verbose=True, **kwargs):
|
||||
self.verbose = verbose
|
||||
super().__init__(hparams, **kwargs)
|
||||
@@ -99,7 +99,8 @@ class MedianLVQ(NonGradientMixin, GLVQ):
|
||||
return lower_bound
|
||||
|
||||
def training_step(self, train_batch, batch_idx, optimizer_idx=None):
|
||||
protos, plabels = self.proto_layer()
|
||||
protos = self.proto_layer.components
|
||||
plabels = self.proto_layer.labels
|
||||
|
||||
x, y = train_batch
|
||||
dis = self.compute_distances(x)
|
||||
|
58
prototorch/models/nam.py
Normal file
58
prototorch/models/nam.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""ProtoTorch Neural Additive Model."""
|
||||
|
||||
import torch
|
||||
import torchmetrics
|
||||
|
||||
from .abstract import ProtoTorchBolt
|
||||
|
||||
|
||||
class BinaryNAM(ProtoTorchBolt):
|
||||
"""Neural Additive Model for binary classification.
|
||||
|
||||
Paper: https://arxiv.org/abs/2004.13912
|
||||
Official implementation: https://github.com/google-research/google-research/tree/master/neural_additive_models
|
||||
|
||||
"""
|
||||
def __init__(self, hparams: dict, extractors: torch.nn.ModuleList,
|
||||
**kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
# Default hparams
|
||||
self.hparams.setdefault("threshold", 0.5)
|
||||
|
||||
self.extractors = extractors
|
||||
self.linear = torch.nn.Linear(in_features=len(extractors),
|
||||
out_features=1,
|
||||
bias=True)
|
||||
|
||||
def extract(self, x):
|
||||
"""Apply the local extractors batch-wise on features."""
|
||||
out = torch.zeros_like(x)
|
||||
for j in range(x.shape[1]):
|
||||
out[:, j] = self.extractors[j](x[:, j].unsqueeze(1)).squeeze()
|
||||
return out
|
||||
|
||||
def forward(self, x):
|
||||
x = self.extract(x)
|
||||
x = self.linear(x)
|
||||
return torch.sigmoid(x)
|
||||
|
||||
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
||||
x, y = batch
|
||||
preds = self(x).squeeze()
|
||||
train_loss = torch.nn.functional.binary_cross_entropy(preds, y.float())
|
||||
self.log("train_loss", train_loss)
|
||||
accuracy = torchmetrics.functional.accuracy(preds.int(), y.int())
|
||||
self.log("train_acc",
|
||||
accuracy,
|
||||
on_step=False,
|
||||
on_epoch=True,
|
||||
prog_bar=True,
|
||||
logger=True)
|
||||
return train_loss
|
||||
|
||||
def predict(self, x):
|
||||
out = self(x)
|
||||
pred = torch.zeros_like(out, device=self.device)
|
||||
pred[out > self.hparams.threshold] = 1
|
||||
return pred
|
@@ -1,5 +1,4 @@
|
||||
"""Probabilistic GLVQ methods"""
|
||||
|
||||
import torch
|
||||
|
||||
from ..core.losses import nllr_loss, rslvq_loss
|
||||
@@ -11,7 +10,6 @@ from .glvq import GLVQ, SiameseGMLVQ
|
||||
|
||||
class CELVQ(GLVQ):
|
||||
"""Cross-Entropy Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -21,7 +19,7 @@ class CELVQ(GLVQ):
|
||||
def shared_step(self, batch, batch_idx, optimizer_idx=None):
|
||||
x, y = batch
|
||||
out = self.compute_distances(x) # [None, num_protos]
|
||||
_, plabels = self.proto_layer()
|
||||
plabels = self.proto_layer.labels
|
||||
winning = stratified_min_pooling(out, plabels) # [None, num_classes]
|
||||
probs = -1.0 * winning
|
||||
batch_loss = self.loss(probs, y.long())
|
||||
@@ -30,11 +28,10 @@ class CELVQ(GLVQ):
|
||||
|
||||
|
||||
class ProbabilisticLVQ(GLVQ):
|
||||
|
||||
def __init__(self, hparams, rejection_confidence=0.0, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
self.conditional_distribution = None
|
||||
self.conditional_distribution = GaussianPrior(self.hparams.variance)
|
||||
self.rejection_confidence = rejection_confidence
|
||||
|
||||
def forward(self, x):
|
||||
@@ -56,28 +53,25 @@ class ProbabilisticLVQ(GLVQ):
|
||||
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
||||
x, y = batch
|
||||
out = self.forward(x)
|
||||
_, plabels = self.proto_layer()
|
||||
plabels = self.proto_layer.labels
|
||||
batch_loss = self.loss(out, y, plabels)
|
||||
loss = batch_loss.sum()
|
||||
return loss
|
||||
train_loss = batch_loss.sum()
|
||||
self.log("train_loss", train_loss)
|
||||
return train_loss
|
||||
|
||||
|
||||
class SLVQ(ProbabilisticLVQ):
|
||||
"""Soft Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.loss = LossLayer(nllr_loss)
|
||||
self.conditional_distribution = GaussianPrior(self.hparams.variance)
|
||||
|
||||
|
||||
class RSLVQ(ProbabilisticLVQ):
|
||||
"""Robust Soft Learning Vector Quantization."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.loss = LossLayer(rslvq_loss)
|
||||
self.conditional_distribution = GaussianPrior(self.hparams.variance)
|
||||
|
||||
|
||||
class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):
|
||||
@@ -85,7 +79,6 @@ class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):
|
||||
|
||||
TODO: Use Backbone LVQ instead
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.conditional_distribution = RankScaledGaussianPrior(
|
||||
|
@@ -18,7 +18,6 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
|
||||
TODO Allow non-2D grids
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
h, w = hparams.get("shape")
|
||||
# Ignore `num_prototypes`
|
||||
@@ -54,7 +53,7 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
|
||||
grid = self._grid.view(-1, 2)
|
||||
gd = squared_euclidean_distance(wp, grid)
|
||||
nh = torch.exp(-gd / self._sigma**2)
|
||||
protos = self.proto_layer()
|
||||
protos = self.proto_layer.components
|
||||
diff = x.unsqueeze(dim=1) - protos
|
||||
delta = self._lr * self.hparams.alpha * nh.unsqueeze(-1) * diff
|
||||
updated_protos = protos + delta.sum(dim=0)
|
||||
@@ -70,7 +69,6 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
|
||||
|
||||
|
||||
class HeskesSOM(UnsupervisedPrototypeModel):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -80,7 +78,6 @@ class HeskesSOM(UnsupervisedPrototypeModel):
|
||||
|
||||
|
||||
class NeuralGas(UnsupervisedPrototypeModel):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
@@ -113,7 +110,6 @@ class NeuralGas(UnsupervisedPrototypeModel):
|
||||
|
||||
|
||||
class GrowingNeuralGas(NeuralGas):
|
||||
|
||||
def __init__(self, hparams, **kwargs):
|
||||
super().__init__(hparams, **kwargs)
|
||||
|
||||
|
@@ -11,7 +11,6 @@ from ..utils.utils import mesh2d
|
||||
|
||||
|
||||
class Vis2DAbstract(pl.Callback):
|
||||
|
||||
def __init__(self,
|
||||
data,
|
||||
title="Prototype Visualization",
|
||||
@@ -118,8 +117,25 @@ class Vis2DAbstract(pl.Callback):
|
||||
plt.close()
|
||||
|
||||
|
||||
class VisGLVQ2D(Vis2DAbstract):
|
||||
class Vis2D(Vis2DAbstract):
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if not self.precheck(trainer):
|
||||
return True
|
||||
|
||||
x_train, y_train = self.x_train, self.y_train
|
||||
ax = self.setup_ax(xlabel="Data dimension 1",
|
||||
ylabel="Data dimension 2")
|
||||
self.plot_data(ax, x_train, y_train)
|
||||
mesh_input, xx, yy = mesh2d(x_train, self.border, self.resolution)
|
||||
mesh_input = torch.from_numpy(mesh_input).type_as(x_train)
|
||||
y_pred = pl_module.predict(mesh_input)
|
||||
y_pred = y_pred.cpu().reshape(xx.shape)
|
||||
ax.contourf(xx, yy, y_pred, cmap=self.cmap, alpha=0.35)
|
||||
|
||||
self.log_and_display(trainer, pl_module)
|
||||
|
||||
|
||||
class VisGLVQ2D(Vis2DAbstract):
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if not self.precheck(trainer):
|
||||
return True
|
||||
@@ -143,7 +159,6 @@ class VisGLVQ2D(Vis2DAbstract):
|
||||
|
||||
|
||||
class VisSiameseGLVQ2D(Vis2DAbstract):
|
||||
|
||||
def __init__(self, *args, map_protos=True, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.map_protos = map_protos
|
||||
@@ -181,42 +196,7 @@ class VisSiameseGLVQ2D(Vis2DAbstract):
|
||||
self.log_and_display(trainer, pl_module)
|
||||
|
||||
|
||||
class VisGMLVQ2D(Vis2DAbstract):
|
||||
|
||||
def __init__(self, *args, ev_proj=True, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.ev_proj = ev_proj
|
||||
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if not self.precheck(trainer):
|
||||
return True
|
||||
|
||||
protos = pl_module.prototypes
|
||||
plabels = pl_module.prototype_labels
|
||||
x_train, y_train = self.x_train, self.y_train
|
||||
device = pl_module.device
|
||||
omega = pl_module._omega.detach()
|
||||
lam = omega @ omega.T
|
||||
u, _, _ = torch.pca_lowrank(lam, q=2)
|
||||
with torch.no_grad():
|
||||
x_train = torch.Tensor(x_train).to(device)
|
||||
x_train = x_train @ u
|
||||
x_train = x_train.cpu().detach()
|
||||
if self.show_protos:
|
||||
with torch.no_grad():
|
||||
protos = torch.Tensor(protos).to(device)
|
||||
protos = protos @ u
|
||||
protos = protos.cpu().detach()
|
||||
ax = self.setup_ax()
|
||||
self.plot_data(ax, x_train, y_train)
|
||||
if self.show_protos:
|
||||
self.plot_protos(ax, protos, plabels)
|
||||
|
||||
self.log_and_display(trainer, pl_module)
|
||||
|
||||
|
||||
class VisCBC2D(Vis2DAbstract):
|
||||
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if not self.precheck(trainer):
|
||||
return True
|
||||
@@ -240,7 +220,6 @@ class VisCBC2D(Vis2DAbstract):
|
||||
|
||||
|
||||
class VisNG2D(Vis2DAbstract):
|
||||
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if not self.precheck(trainer):
|
||||
return True
|
||||
@@ -268,7 +247,6 @@ class VisNG2D(Vis2DAbstract):
|
||||
|
||||
|
||||
class VisImgComp(Vis2DAbstract):
|
||||
|
||||
def __init__(self,
|
||||
*args,
|
||||
random_data=0,
|
||||
@@ -291,6 +269,8 @@ class VisImgComp(Vis2DAbstract):
|
||||
size=self.embedding_data,
|
||||
replace=False)
|
||||
data = self.x_train[ind]
|
||||
# print(f"{data.shape=}")
|
||||
# print(f"{self.y_train[ind].shape=}")
|
||||
tb.add_embedding(data.view(len(ind), -1),
|
||||
label_img=data,
|
||||
global_step=None,
|
||||
|
23
setup.cfg
23
setup.cfg
@@ -1,23 +1,8 @@
|
||||
[isort]
|
||||
profile = hug
|
||||
src_paths = isort, test
|
||||
|
||||
[yapf]
|
||||
based_on_style = pep8
|
||||
spaces_before_comment = 2
|
||||
split_before_logical_operator = true
|
||||
|
||||
[pylint]
|
||||
disable =
|
||||
too-many-arguments,
|
||||
too-few-public-methods,
|
||||
fixme,
|
||||
|
||||
|
||||
[pycodestyle]
|
||||
max-line-length = 79
|
||||
|
||||
[isort]
|
||||
profile = hug
|
||||
src_paths = isort, test
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = True
|
||||
force_grid_wrap = 3
|
||||
use_parentheses = True
|
||||
line_length = 79
|
||||
|
10
setup.py
10
setup.py
@@ -22,7 +22,7 @@ with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
INSTALL_REQUIRES = [
|
||||
"prototorch>=0.7.0",
|
||||
"prototorch>=0.6.0",
|
||||
"pytorch_lightning>=1.3.5",
|
||||
"torchmetrics",
|
||||
]
|
||||
@@ -37,7 +37,6 @@ DOCS = [
|
||||
"recommonmark",
|
||||
"sphinx",
|
||||
"nbsphinx",
|
||||
"ipykernel",
|
||||
"sphinx_rtd_theme",
|
||||
"sphinxcontrib-katex",
|
||||
"sphinxcontrib-bibtex",
|
||||
@@ -54,7 +53,7 @@ ALL = CLI + DEV + DOCS + EXAMPLES + TESTS
|
||||
|
||||
setup(
|
||||
name=safe_name("prototorch_" + PLUGIN_NAME),
|
||||
version="0.4.1",
|
||||
version="0.2.0",
|
||||
description="Pre-packaged prototype-based "
|
||||
"machine learning models using ProtoTorch and PyTorch-Lightning.",
|
||||
long_description=long_description,
|
||||
@@ -64,7 +63,7 @@ setup(
|
||||
url=PROJECT_URL,
|
||||
download_url=DOWNLOAD_URL,
|
||||
license="MIT",
|
||||
python_requires=">=3.6",
|
||||
python_requires=">=3.9",
|
||||
install_requires=INSTALL_REQUIRES,
|
||||
extras_require={
|
||||
"dev": DEV,
|
||||
@@ -81,9 +80,6 @@ setup(
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Natural Language :: English",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.6",
|
||||
"Operating System :: OS Independent",
|
||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||
"Topic :: Software Development :: Libraries",
|
||||
|
@@ -4,7 +4,6 @@ import unittest
|
||||
|
||||
|
||||
class TestDummy(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
|
@@ -1,27 +1,11 @@
|
||||
#! /bin/bash
|
||||
|
||||
|
||||
# Read Flags
|
||||
gpu=0
|
||||
while [ -n "$1" ]; do
|
||||
case "$1" in
|
||||
--gpu) gpu=1;;
|
||||
-g) gpu=1;;
|
||||
*) path=$1;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
python --version
|
||||
echo "Using GPU: " $gpu
|
||||
|
||||
# Loop
|
||||
failed=0
|
||||
|
||||
for example in $(find $path -maxdepth 1 -name "*.py")
|
||||
for example in $(find $1 -maxdepth 1 -name "*.py")
|
||||
do
|
||||
echo -n "$x" $example '... '
|
||||
export DISPLAY= && python $example --fast_dev_run 1 --gpus $gpu &> run_log.txt
|
||||
export DISPLAY= && python $example --fast_dev_run 1 &> run_log.txt
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "FAILED!!"
|
||||
cat run_log.txt
|
||||
|
Reference in New Issue
Block a user