6 Commits

Author SHA1 Message Date
Alexander Engelsberger
7d4a041df2 build: bump version 0.2.0 → 0.3.0 2021-08-30 20:50:03 +02:00
Alexander Engelsberger
04c51c00c6 ci: seperate build step 2021-08-30 20:44:16 +02:00
Alexander Engelsberger
62185b38cf chore: Update prototorch dependency 2021-08-30 20:32:47 +02:00
Alexander Engelsberger
7b93cd4ad5 feat(compatibility): Python3.6 compatibility 2021-08-30 20:32:40 +02:00
Alexander Engelsberger
d7834e2cc0 fix: All examples should work on CPU and GPU now 2021-08-05 11:20:02 +02:00
Alexander Engelsberger
0af8cf36f8 fix: labels where on cpu in forward pass 2021-08-05 09:14:32 +02:00
16 changed files with 78 additions and 293 deletions

View File

@@ -1,5 +1,5 @@
[bumpversion] [bumpversion]
current_version = 0.2.0 current_version = 0.3.0
commit = True commit = True
tag = True tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)

View File

@@ -1,7 +1,11 @@
dist: bionic dist: bionic
sudo: false sudo: false
language: python language: python
python: 3.9 python:
- 3.9
- 3.8
- 3.7
- 3.6
cache: cache:
directories: directories:
- "$HOME/.cache/pip" - "$HOME/.cache/pip"
@@ -15,11 +19,26 @@ script:
- ./tests/test_examples.sh examples/ - ./tests/test_examples.sh examples/
after_success: after_success:
- bash <(curl -s https://codecov.io/bash) - bash <(curl -s https://codecov.io/bash)
deploy:
provider: pypi # Publish on PyPI
username: __token__ jobs:
password: include:
secure: PDoASdYdVlt1aIROYilAsCW6XpBs/TDel0CSptDzX0CI7i4+ksEW6Jk0JyL58bQt7V4F8PeGty4A8SODzAUIk2d8sty5RI4VJjvXZFCXlUsW+JGUN3EvWNqJLnwN8TDxgu2ENao37GUh0dC6pL8b6bVDGeOLaY1E/YR1jimmTJuxxjKjBIU8ByqTNBnC3rzybMTPU3nRoOM/WMQUyReHrPoUJj685sLqrLruhAqhiYsPbotP8xY6i8+KBbhp5vgiARV2+LkbeGcYZwozCzrEqPKY7YIfVPh895cw0v4NRyFwK1P2jyyIt22Z9Ni0Uy1J5/Qp9Sv6mBPeGjm3pnpDCQyS+2bNIDaj08KUYTIo1mC/Jcu4jQgppZEF+oey9q1tgGo+/JhsTeERKV9BoPF5HDiRArU1s5aWJjFnCsHfu+W1XqX8bwN3aTYsEIaApT3/irc6XyFJIfMN82+z+lUcZ4Y1yAHT3nH1Vif+pZYZB0UOSGrHwuI/UayjKzbCzHMuHWylWB/9ehd4o4YVp6iubVHc7Sj0KQkwBgwgl6TvwNcUuFsplFabCxmX0mVcavXsWiOBc+ivPmU6574zGj0JcEk5ghVgnKH+QS96aVrKOzegwbl4O13jY8dJp+/zgXl0gJOvRKr4BhuBJKcBaMQHdSKUChVsJJtqDyt59GvWcbg= - stage: build
on: python: 3.9
tags: true script: echo "Starting Pypi build"
skip_existing: true deploy:
provider: pypi
username: __token__
distributions: "sdist bdist_wheel"
password:
secure: PDoASdYdVlt1aIROYilAsCW6XpBs/TDel0CSptDzX0CI7i4+ksEW6Jk0JyL58bQt7V4F8PeGty4A8SODzAUIk2d8sty5RI4VJjvXZFCXlUsW+JGUN3EvWNqJLnwN8TDxgu2ENao37GUh0dC6pL8b6bVDGeOLaY1E/YR1jimmTJuxxjKjBIU8ByqTNBnC3rzybMTPU3nRoOM/WMQUyReHrPoUJj685sLqrLruhAqhiYsPbotP8xY6i8+KBbhp5vgiARV2+LkbeGcYZwozCzrEqPKY7YIfVPh895cw0v4NRyFwK1P2jyyIt22Z9Ni0Uy1J5/Qp9Sv6mBPeGjm3pnpDCQyS+2bNIDaj08KUYTIo1mC/Jcu4jQgppZEF+oey9q1tgGo+/JhsTeERKV9BoPF5HDiRArU1s5aWJjFnCsHfu+W1XqX8bwN3aTYsEIaApT3/irc6XyFJIfMN82+z+lUcZ4Y1yAHT3nH1Vif+pZYZB0UOSGrHwuI/UayjKzbCzHMuHWylWB/9ehd4o4YVp6iubVHc7Sj0KQkwBgwgl6TvwNcUuFsplFabCxmX0mVcavXsWiOBc+ivPmU6574zGj0JcEk5ghVgnKH+QS96aVrKOzegwbl4O13jY8dJp+/zgXl0gJOvRKr4BhuBJKcBaMQHdSKUChVsJJtqDyt59GvWcbg=
on:
tags: true
skip_existing: true
# The password is encrypted with:
# `cd prototorch && travis encrypt your-pypi-api-token --add deploy.password`
# See https://docs.travis-ci.com/user/deployment/pypi and
# https://github.com/travis-ci/travis.rb#installation
# for more details
# Note: The encrypt command does not work well in ZSH.

View File

@@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
# The full version, including alpha/beta/rc tags # The full version, including alpha/beta/rc tags
# #
release = "0.2.0" release = "0.3.0"
# -- General configuration --------------------------------------------------- # -- General configuration ---------------------------------------------------

View File

@@ -1,81 +0,0 @@
"""Neural Additive Model (NAM) example for binary classification."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
from matplotlib import pyplot as plt
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.Tecator("~/datasets")
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64)
# Hyperparameters
hparams = dict(lr=0.1)
# Define the feature extractor
class FE(torch.nn.Module):
def __init__(self):
super().__init__()
self.modules_list = torch.nn.ModuleList([
torch.nn.Linear(1, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
])
def forward(self, x):
for m in self.modules_list:
x = m(x)
return x
# Initialize the model
model = pt.models.BinaryNAM(
hparams,
extractors=torch.nn.ModuleList([FE() for _ in range(100)]),
)
# Compute intermediate input and output sizes
model.example_input_array = torch.zeros(4, 100)
# Callbacks
es = pl.callbacks.EarlyStopping(
monitor="train_loss",
min_delta=0.001,
patience=20,
mode="min",
verbose=True,
check_on_train_epoch_end=True,
)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
es,
],
terminate_on_nan=True,
weights_summary=None,
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)
# Visualize extractor shape functions
fig, axes = plt.subplots(10, 10)
for i, ax in enumerate(axes.flat):
x = torch.linspace(-2, 2, 100) # TODO use min/max from data
y = model.extractors[i](x.view(100, 1)).squeeze().detach()
ax.plot(x, y)
ax.set(title=f"Feature {i + 1}", xticklabels=[], yticklabels=[])
plt.show()

View File

@@ -1,86 +0,0 @@
"""Neural Additive Model (NAM) example for binary classification."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
from matplotlib import pyplot as plt
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.XOR()
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=256)
# Hyperparameters
hparams = dict(lr=0.001)
# Define the feature extractor
class FE(torch.nn.Module):
def __init__(self, hidden_size=10):
super().__init__()
self.modules_list = torch.nn.ModuleList([
torch.nn.Linear(1, hidden_size),
torch.nn.ReLU(),
torch.nn.Linear(hidden_size, 1),
torch.nn.ReLU(),
])
def forward(self, x):
for m in self.modules_list:
x = m(x)
return x
# Initialize the model
model = pt.models.BinaryNAM(
hparams,
extractors=torch.nn.ModuleList([FE(20) for _ in range(2)]),
)
# Compute intermediate input and output sizes
model.example_input_array = torch.zeros(4, 2)
# Summary
print(model)
# Callbacks
vis = pt.models.Vis2D(data=train_ds)
es = pl.callbacks.EarlyStopping(
monitor="train_loss",
min_delta=0.001,
patience=50,
mode="min",
verbose=False,
check_on_train_epoch_end=True,
)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[
vis,
es,
],
terminate_on_nan=True,
weights_summary="full",
accelerator="ddp",
)
# Training loop
trainer.fit(model, train_loader)
# Visualize extractor shape functions
fig, axes = plt.subplots(2)
for i, ax in enumerate(axes.flat):
x = torch.linspace(0, 1, 100) # TODO use min/max from data
y = model.extractors[i](x.view(100, 1)).squeeze().detach()
ax.plot(x, y)
ax.set(title=f"Feature {i + 1}")
plt.show()

View File

@@ -1,7 +1,5 @@
"""`models` plugin for the `prototorch` package.""" """`models` plugin for the `prototorch` package."""
from importlib.metadata import PackageNotFoundError, version
from .callbacks import PrototypeConvergence, PruneLoserPrototypes from .callbacks import PrototypeConvergence, PruneLoserPrototypes
from .cbc import CBC, ImageCBC from .cbc import CBC, ImageCBC
from .glvq import ( from .glvq import (
@@ -19,9 +17,8 @@ from .glvq import (
) )
from .knn import KNN from .knn import KNN
from .lvq import LVQ1, LVQ21, MedianLVQ from .lvq import LVQ1, LVQ21, MedianLVQ
from .nam import BinaryNAM
from .probabilistic import CELVQ, PLVQ, RSLVQ, SLVQ from .probabilistic import CELVQ, PLVQ, RSLVQ, SLVQ
from .unsupervised import GrowingNeuralGas, HeskesSOM, KohonenSOM, NeuralGas from .unsupervised import GrowingNeuralGas, HeskesSOM, KohonenSOM, NeuralGas
from .vis import * from .vis import *
__version__ = "0.2.0" __version__ = "0.3.0"

View File

@@ -1,7 +1,5 @@
"""Abstract classes to be inherited by prototorch models.""" """Abstract classes to be inherited by prototorch models."""
from typing import Final, final
import pytorch_lightning as pl import pytorch_lightning as pl
import torch import torch
import torchmetrics import torchmetrics
@@ -43,7 +41,6 @@ class ProtoTorchBolt(pl.LightningModule):
else: else:
return optimizer return optimizer
@final
def reconfigure_optimizers(self): def reconfigure_optimizers(self):
self.trainer.accelerator.setup_optimizers(self.trainer) self.trainer.accelerator.setup_optimizers(self.trainer)
@@ -96,7 +93,7 @@ class UnsupervisedPrototypeModel(PrototypeModel):
) )
def compute_distances(self, x): def compute_distances(self, x):
protos = self.proto_layer() protos = self.proto_layer().type_as(x)
distances = self.distance_layer(x, protos) distances = self.distance_layer(x, protos)
return distances return distances
@@ -136,14 +133,14 @@ class SupervisedPrototypeModel(PrototypeModel):
def forward(self, x): def forward(self, x):
distances = self.compute_distances(x) distances = self.compute_distances(x)
plabels = self.proto_layer.labels _, plabels = self.proto_layer()
winning = stratified_min_pooling(distances, plabels) winning = stratified_min_pooling(distances, plabels)
y_pred = torch.nn.functional.softmin(winning) y_pred = torch.nn.functional.softmin(winning)
return y_pred return y_pred
def predict_from_distances(self, distances): def predict_from_distances(self, distances):
with torch.no_grad(): with torch.no_grad():
plabels = self.proto_layer.labels _, plabels = self.proto_layer()
y_pred = self.competition_layer(distances, plabels) y_pred = self.competition_layer(distances, plabels)
return y_pred return y_pred
@@ -175,7 +172,7 @@ class NonGradientMixin(ProtoTorchMixin):
"""Mixin for custom non-gradient optimization.""" """Mixin for custom non-gradient optimization."""
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.automatic_optimization: Final = False self.automatic_optimization = False
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
raise NotImplementedError raise NotImplementedError
@@ -183,7 +180,6 @@ class NonGradientMixin(ProtoTorchMixin):
class ImagePrototypesMixin(ProtoTorchMixin): class ImagePrototypesMixin(ProtoTorchMixin):
"""Mixin for models with image prototypes.""" """Mixin for models with image prototypes."""
@final
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx): def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
"""Constrain the components to the range [0, 1] by clamping after updates.""" """Constrain the components to the range [0, 1] by clamping after updates."""
self.proto_layer.components.data.clamp_(0.0, 1.0) self.proto_layer.components.data.clamp_(0.0, 1.0)

View File

@@ -55,7 +55,7 @@ class PruneLoserPrototypes(pl.Callback):
distribution = dict(zip(labels.tolist(), counts.tolist())) distribution = dict(zip(labels.tolist(), counts.tolist()))
if self.verbose: if self.verbose:
print(f"Re-adding pruned prototypes...") print(f"Re-adding pruned prototypes...")
print(f"{distribution=}") print(f"distribution={distribution}")
pl_module.add_prototypes( pl_module.add_prototypes(
distribution=distribution, distribution=distribution,
components_initializer=self.prototypes_initializer) components_initializer=self.prototypes_initializer)
@@ -134,4 +134,4 @@ class GNGCallback(pl.Callback):
pl_module.errors[ pl_module.errors[
worst_neighbor] = errors[worst_neighbor] * self.reduction worst_neighbor] = errors[worst_neighbor] * self.reduction
trainer.accelerator_backend.setup_optimizers(trainer) trainer.accelerator.setup_optimizers(trainer)

View File

@@ -55,7 +55,7 @@ class GLVQ(SupervisedPrototypeModel):
def shared_step(self, batch, batch_idx, optimizer_idx=None): def shared_step(self, batch, batch_idx, optimizer_idx=None):
x, y = batch x, y = batch
out = self.compute_distances(x) out = self.compute_distances(x)
plabels = self.proto_layer.labels _, plabels = self.proto_layer()
loss = self.loss(out, y, plabels) loss = self.loss(out, y, plabels)
return out, loss return out, loss
@@ -112,7 +112,8 @@ class SiameseGLVQ(GLVQ):
proto_opt = self.optimizer(self.proto_layer.parameters(), proto_opt = self.optimizer(self.proto_layer.parameters(),
lr=self.hparams.proto_lr) lr=self.hparams.proto_lr)
# Only add a backbone optimizer if backbone has trainable parameters # Only add a backbone optimizer if backbone has trainable parameters
if (bb_params := list(self.backbone.parameters())): bb_params = list(self.backbone.parameters())
if (bb_params):
bb_opt = self.optimizer(bb_params, lr=self.hparams.bb_lr) bb_opt = self.optimizer(bb_params, lr=self.hparams.bb_lr)
optimizers = [proto_opt, bb_opt] optimizers = [proto_opt, bb_opt]
else: else:

View File

@@ -10,9 +10,7 @@ from .glvq import GLVQ
class LVQ1(NonGradientMixin, GLVQ): class LVQ1(NonGradientMixin, GLVQ):
"""Learning Vector Quantization 1.""" """Learning Vector Quantization 1."""
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
protos = self.proto_layer.components protos, plables = self.proto_layer()
plabels = self.proto_layer.labels
x, y = train_batch x, y = train_batch
dis = self.compute_distances(x) dis = self.compute_distances(x)
# TODO Vectorized implementation # TODO Vectorized implementation
@@ -30,8 +28,8 @@ class LVQ1(NonGradientMixin, GLVQ):
self.proto_layer.load_state_dict({"_components": updated_protos}, self.proto_layer.load_state_dict({"_components": updated_protos},
strict=False) strict=False)
print(f"{dis=}") print(f"dis={dis}")
print(f"{y=}") print(f"y={y}")
# Logging # Logging
self.log_acc(dis, y, tag="train_acc") self.log_acc(dis, y, tag="train_acc")
@@ -41,8 +39,7 @@ class LVQ1(NonGradientMixin, GLVQ):
class LVQ21(NonGradientMixin, GLVQ): class LVQ21(NonGradientMixin, GLVQ):
"""Learning Vector Quantization 2.1.""" """Learning Vector Quantization 2.1."""
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
protos = self.proto_layer.components protos, plabels = self.proto_layer()
plabels = self.proto_layer.labels
x, y = train_batch x, y = train_batch
dis = self.compute_distances(x) dis = self.compute_distances(x)
@@ -99,8 +96,7 @@ class MedianLVQ(NonGradientMixin, GLVQ):
return lower_bound return lower_bound
def training_step(self, train_batch, batch_idx, optimizer_idx=None): def training_step(self, train_batch, batch_idx, optimizer_idx=None):
protos = self.proto_layer.components protos, plabels = self.proto_layer()
plabels = self.proto_layer.labels
x, y = train_batch x, y = train_batch
dis = self.compute_distances(x) dis = self.compute_distances(x)

View File

@@ -1,58 +0,0 @@
"""ProtoTorch Neural Additive Model."""
import torch
import torchmetrics
from .abstract import ProtoTorchBolt
class BinaryNAM(ProtoTorchBolt):
"""Neural Additive Model for binary classification.
Paper: https://arxiv.org/abs/2004.13912
Official implementation: https://github.com/google-research/google-research/tree/master/neural_additive_models
"""
def __init__(self, hparams: dict, extractors: torch.nn.ModuleList,
**kwargs):
super().__init__(hparams, **kwargs)
# Default hparams
self.hparams.setdefault("threshold", 0.5)
self.extractors = extractors
self.linear = torch.nn.Linear(in_features=len(extractors),
out_features=1,
bias=True)
def extract(self, x):
"""Apply the local extractors batch-wise on features."""
out = torch.zeros_like(x)
for j in range(x.shape[1]):
out[:, j] = self.extractors[j](x[:, j].unsqueeze(1)).squeeze()
return out
def forward(self, x):
x = self.extract(x)
x = self.linear(x)
return torch.sigmoid(x)
def training_step(self, batch, batch_idx, optimizer_idx=None):
x, y = batch
preds = self(x).squeeze()
train_loss = torch.nn.functional.binary_cross_entropy(preds, y.float())
self.log("train_loss", train_loss)
accuracy = torchmetrics.functional.accuracy(preds.int(), y.int())
self.log("train_acc",
accuracy,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True)
return train_loss
def predict(self, x):
out = self(x)
pred = torch.zeros_like(out, device=self.device)
pred[out > self.hparams.threshold] = 1
return pred

View File

@@ -1,4 +1,5 @@
"""Probabilistic GLVQ methods""" """Probabilistic GLVQ methods"""
import torch import torch
from ..core.losses import nllr_loss, rslvq_loss from ..core.losses import nllr_loss, rslvq_loss
@@ -19,7 +20,7 @@ class CELVQ(GLVQ):
def shared_step(self, batch, batch_idx, optimizer_idx=None): def shared_step(self, batch, batch_idx, optimizer_idx=None):
x, y = batch x, y = batch
out = self.compute_distances(x) # [None, num_protos] out = self.compute_distances(x) # [None, num_protos]
plabels = self.proto_layer.labels _, plabels = self.proto_layer()
winning = stratified_min_pooling(out, plabels) # [None, num_classes] winning = stratified_min_pooling(out, plabels) # [None, num_classes]
probs = -1.0 * winning probs = -1.0 * winning
batch_loss = self.loss(probs, y.long()) batch_loss = self.loss(probs, y.long())
@@ -31,7 +32,7 @@ class ProbabilisticLVQ(GLVQ):
def __init__(self, hparams, rejection_confidence=0.0, **kwargs): def __init__(self, hparams, rejection_confidence=0.0, **kwargs):
super().__init__(hparams, **kwargs) super().__init__(hparams, **kwargs)
self.conditional_distribution = GaussianPrior(self.hparams.variance) self.conditional_distribution = None
self.rejection_confidence = rejection_confidence self.rejection_confidence = rejection_confidence
def forward(self, x): def forward(self, x):
@@ -53,11 +54,10 @@ class ProbabilisticLVQ(GLVQ):
def training_step(self, batch, batch_idx, optimizer_idx=None): def training_step(self, batch, batch_idx, optimizer_idx=None):
x, y = batch x, y = batch
out = self.forward(x) out = self.forward(x)
plabels = self.proto_layer.labels _, plabels = self.proto_layer()
batch_loss = self.loss(out, y, plabels) batch_loss = self.loss(out, y, plabels)
train_loss = batch_loss.sum() loss = batch_loss.sum()
self.log("train_loss", train_loss) return loss
return train_loss
class SLVQ(ProbabilisticLVQ): class SLVQ(ProbabilisticLVQ):
@@ -65,6 +65,7 @@ class SLVQ(ProbabilisticLVQ):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.loss = LossLayer(nllr_loss) self.loss = LossLayer(nllr_loss)
self.conditional_distribution = GaussianPrior(self.hparams.variance)
class RSLVQ(ProbabilisticLVQ): class RSLVQ(ProbabilisticLVQ):
@@ -72,6 +73,7 @@ class RSLVQ(ProbabilisticLVQ):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.loss = LossLayer(rslvq_loss) self.loss = LossLayer(rslvq_loss)
self.conditional_distribution = GaussianPrior(self.hparams.variance)
class PLVQ(ProbabilisticLVQ, SiameseGMLVQ): class PLVQ(ProbabilisticLVQ, SiameseGMLVQ):

View File

@@ -53,7 +53,7 @@ class KohonenSOM(NonGradientMixin, UnsupervisedPrototypeModel):
grid = self._grid.view(-1, 2) grid = self._grid.view(-1, 2)
gd = squared_euclidean_distance(wp, grid) gd = squared_euclidean_distance(wp, grid)
nh = torch.exp(-gd / self._sigma**2) nh = torch.exp(-gd / self._sigma**2)
protos = self.proto_layer.components protos = self.proto_layer()
diff = x.unsqueeze(dim=1) - protos diff = x.unsqueeze(dim=1) - protos
delta = self._lr * self.hparams.alpha * nh.unsqueeze(-1) * diff delta = self._lr * self.hparams.alpha * nh.unsqueeze(-1) * diff
updated_protos = protos + delta.sum(dim=0) updated_protos = protos + delta.sum(dim=0)

View File

@@ -117,24 +117,6 @@ class Vis2DAbstract(pl.Callback):
plt.close() plt.close()
class Vis2D(Vis2DAbstract):
def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer):
return True
x_train, y_train = self.x_train, self.y_train
ax = self.setup_ax(xlabel="Data dimension 1",
ylabel="Data dimension 2")
self.plot_data(ax, x_train, y_train)
mesh_input, xx, yy = mesh2d(x_train, self.border, self.resolution)
mesh_input = torch.from_numpy(mesh_input).type_as(x_train)
y_pred = pl_module.predict(mesh_input)
y_pred = y_pred.cpu().reshape(xx.shape)
ax.contourf(xx, yy, y_pred, cmap=self.cmap, alpha=0.35)
self.log_and_display(trainer, pl_module)
class VisGLVQ2D(Vis2DAbstract): class VisGLVQ2D(Vis2DAbstract):
def on_epoch_end(self, trainer, pl_module): def on_epoch_end(self, trainer, pl_module):
if not self.precheck(trainer): if not self.precheck(trainer):
@@ -269,8 +251,6 @@ class VisImgComp(Vis2DAbstract):
size=self.embedding_data, size=self.embedding_data,
replace=False) replace=False)
data = self.x_train[ind] data = self.x_train[ind]
# print(f"{data.shape=}")
# print(f"{self.y_train[ind].shape=}")
tb.add_embedding(data.view(len(ind), -1), tb.add_embedding(data.view(len(ind), -1),
label_img=data, label_img=data,
global_step=None, global_step=None,

View File

@@ -22,7 +22,7 @@ with open("README.md", "r") as fh:
long_description = fh.read() long_description = fh.read()
INSTALL_REQUIRES = [ INSTALL_REQUIRES = [
"prototorch>=0.6.0", "prototorch>=0.7.0",
"pytorch_lightning>=1.3.5", "pytorch_lightning>=1.3.5",
"torchmetrics", "torchmetrics",
] ]
@@ -53,7 +53,7 @@ ALL = CLI + DEV + DOCS + EXAMPLES + TESTS
setup( setup(
name=safe_name("prototorch_" + PLUGIN_NAME), name=safe_name("prototorch_" + PLUGIN_NAME),
version="0.2.0", version="0.3.0",
description="Pre-packaged prototype-based " description="Pre-packaged prototype-based "
"machine learning models using ProtoTorch and PyTorch-Lightning.", "machine learning models using ProtoTorch and PyTorch-Lightning.",
long_description=long_description, long_description=long_description,
@@ -63,7 +63,7 @@ setup(
url=PROJECT_URL, url=PROJECT_URL,
download_url=DOWNLOAD_URL, download_url=DOWNLOAD_URL,
license="MIT", license="MIT",
python_requires=">=3.9", python_requires=">=3.6",
install_requires=INSTALL_REQUIRES, install_requires=INSTALL_REQUIRES,
extras_require={ extras_require={
"dev": DEV, "dev": DEV,
@@ -80,6 +80,9 @@ setup(
"License :: OSI Approved :: MIT License", "License :: OSI Approved :: MIT License",
"Natural Language :: English", "Natural Language :: English",
"Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent", "Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries", "Topic :: Software Development :: Libraries",

View File

@@ -1,11 +1,27 @@
#! /bin/bash #! /bin/bash
# Read Flags
gpu=0
while [ -n "$1" ]; do
case "$1" in
--gpu) gpu=1;;
-g) gpu=1;;
*) path=$1;;
esac
shift
done
python --version
echo "Using GPU: " $gpu
# Loop
failed=0 failed=0
for example in $(find $1 -maxdepth 1 -name "*.py") for example in $(find $path -maxdepth 1 -name "*.py")
do do
echo -n "$x" $example '... ' echo -n "$x" $example '... '
export DISPLAY= && python $example --fast_dev_run 1 &> run_log.txt export DISPLAY= && python $example --fast_dev_run 1 --gpus $gpu &> run_log.txt
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
echo "FAILED!!" echo "FAILED!!"
cat run_log.txt cat run_log.txt