Compare commits
1 Commits
v0.5.1
...
feature/tr
Author | SHA1 | Date | |
---|---|---|---|
|
17b45249f4 |
@@ -1,5 +1,5 @@
|
||||
[bumpversion]
|
||||
current_version = 0.5.1
|
||||
current_version = 0.5.0
|
||||
commit = True
|
||||
tag = True
|
||||
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -155,4 +155,4 @@ scratch*
|
||||
.vscode/
|
||||
|
||||
reports
|
||||
artifacts
|
||||
artifacts
|
@@ -1,54 +0,0 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-added-large-files
|
||||
- id: check-ast
|
||||
- id: check-case-conflict
|
||||
|
||||
|
||||
- repo: https://github.com/myint/autoflake
|
||||
rev: v1.4
|
||||
hooks:
|
||||
- id: autoflake
|
||||
|
||||
- repo: http://github.com/PyCQA/isort
|
||||
rev: 5.8.0
|
||||
hooks:
|
||||
- id: isort
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: 'v0.902'
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: prototorch
|
||||
additional_dependencies: [types-pkg_resources]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
||||
rev: 'v0.31.0' # Use the sha / tag you want to point at
|
||||
hooks:
|
||||
- id: yapf
|
||||
|
||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
||||
rev: v1.9.0 # Use the ref you want to point at
|
||||
hooks:
|
||||
- id: python-use-type-annotations
|
||||
- id: python-no-log-warn
|
||||
- id: python-check-blanket-noqa
|
||||
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.19.4
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
|
||||
- repo: https://github.com/jorisroovers/gitlint
|
||||
rev: "v0.15.1"
|
||||
hooks:
|
||||
- id: gitlint
|
||||
args: [--contrib=CT1, --ignore=B6, --msg-filename]
|
11
README.md
11
README.md
@@ -48,17 +48,6 @@ pip install -e .[all]
|
||||
The documentation is available at <https://www.prototorch.ml/en/latest/>. Should
|
||||
that link not work try <https://prototorch.readthedocs.io/en/latest/>.
|
||||
|
||||
## Contribution
|
||||
|
||||
This repository contains definition for [git hooks](https://githooks.com).
|
||||
[Pre-commit](https://pre-commit.com) gets installed as development dependency with prototorch.
|
||||
Please install the hooks by running
|
||||
```bash
|
||||
pre-commit install
|
||||
pre-commit install --hook-type commit-msg
|
||||
```
|
||||
before creating the first commit.
|
||||
|
||||
## Bibtex
|
||||
|
||||
If you would like to cite the package, please use this:
|
||||
|
@@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
#
|
||||
release = "0.5.1"
|
||||
release = "0.5.0"
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
|
120
examples/glvq_iris.py
Normal file
120
examples/glvq_iris.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""ProtoTorch GLVQ example using 2D Iris data."""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||
from prototorch.functions.competitions import wtac
|
||||
from prototorch.functions.distances import euclidean_distance
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from torchinfo import summary
|
||||
|
||||
# Prepare and preprocess the data
|
||||
scaler = StandardScaler()
|
||||
x_train, y_train = load_iris(return_X_y=True)
|
||||
x_train = x_train[:, [0, 2]]
|
||||
scaler.fit(x_train)
|
||||
x_train = scaler.transform(x_train)
|
||||
|
||||
|
||||
# Define the GLVQ model
|
||||
class Model(torch.nn.Module):
|
||||
def __init__(self):
|
||||
"""GLVQ model for training on 2D Iris data."""
|
||||
super().__init__()
|
||||
prototype_initializer = StratifiedMeanInitializer([x_train, y_train])
|
||||
prototype_distribution = {"num_classes": 3, "prototypes_per_class": 3}
|
||||
self.proto_layer = LabeledComponents(
|
||||
prototype_distribution,
|
||||
prototype_initializer,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
prototypes, prototype_labels = self.proto_layer()
|
||||
distances = euclidean_distance(x, prototypes)
|
||||
return distances, prototype_labels
|
||||
|
||||
|
||||
# Build the GLVQ model
|
||||
model = Model()
|
||||
|
||||
# Print summary using torchinfo (might be buggy/incorrect)
|
||||
print(summary(model))
|
||||
|
||||
# Optimize using SGD optimizer from `torch.optim`
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
|
||||
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||
|
||||
x_in = torch.Tensor(x_train)
|
||||
y_in = torch.Tensor(y_train)
|
||||
|
||||
# Training loop
|
||||
TITLE = "Prototype Visualization"
|
||||
fig = plt.figure(TITLE)
|
||||
for epoch in range(70):
|
||||
# Compute loss
|
||||
distances, prototype_labels = model(x_in)
|
||||
loss = criterion([distances, prototype_labels], y_in)
|
||||
|
||||
# Compute Accuracy
|
||||
with torch.no_grad():
|
||||
predictions = wtac(distances, prototype_labels)
|
||||
correct = predictions.eq(y_in.view_as(predictions)).sum().item()
|
||||
acc = 100.0 * correct / len(x_train)
|
||||
|
||||
print(
|
||||
f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} Acc: {acc:05.02f}%"
|
||||
)
|
||||
|
||||
# Optimizer step
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Get the prototypes form the model
|
||||
prototypes = model.proto_layer.components.numpy()
|
||||
if np.isnan(np.sum(prototypes)):
|
||||
print("Stopping training because of `nan` in prototypes.")
|
||||
break
|
||||
|
||||
# Visualize the data and the prototypes
|
||||
ax = fig.gca()
|
||||
ax.cla()
|
||||
ax.set_title(TITLE)
|
||||
ax.set_xlabel("Data dimension 1")
|
||||
ax.set_ylabel("Data dimension 2")
|
||||
cmap = "viridis"
|
||||
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
|
||||
ax.scatter(
|
||||
prototypes[:, 0],
|
||||
prototypes[:, 1],
|
||||
c=prototype_labels,
|
||||
cmap=cmap,
|
||||
edgecolor="k",
|
||||
marker="D",
|
||||
s=50,
|
||||
)
|
||||
|
||||
# Paint decision regions
|
||||
x = np.vstack((x_train, prototypes))
|
||||
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
|
||||
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
|
||||
xx, yy = np.meshgrid(np.arange(x_min, x_max, 1 / 50),
|
||||
np.arange(y_min, y_max, 1 / 50))
|
||||
mesh_input = np.c_[xx.ravel(), yy.ravel()]
|
||||
|
||||
torch_input = torch.Tensor(mesh_input)
|
||||
d = model(torch_input)[0]
|
||||
w_indices = torch.argmin(d, dim=1)
|
||||
y_pred = torch.index_select(prototype_labels, 0, w_indices)
|
||||
y_pred = y_pred.reshape(xx.shape)
|
||||
|
||||
# Plot voronoi regions
|
||||
ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)
|
||||
|
||||
ax.set_xlim(left=x_min + 0, right=x_max - 0)
|
||||
ax.set_ylim(bottom=y_min + 0, top=y_max - 0)
|
||||
|
||||
plt.pause(0.1)
|
103
examples/gmlvq_tecator.py
Normal file
103
examples/gmlvq_tecator.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""ProtoTorch "siamese" GMLVQ example using Tecator."""
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||
from prototorch.datasets.tecator import Tecator
|
||||
from prototorch.functions.distances import sed
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from prototorch.utils.colors import get_legend_handles
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# Prepare the dataset and dataloader
|
||||
train_data = Tecator(root="./artifacts", train=True)
|
||||
train_loader = DataLoader(train_data, batch_size=128, shuffle=True)
|
||||
|
||||
|
||||
class Model(torch.nn.Module):
|
||||
def __init__(self, **kwargs):
|
||||
"""GMLVQ model as a siamese network."""
|
||||
super().__init__()
|
||||
prototype_initializer = StratifiedMeanInitializer(train_loader)
|
||||
prototype_distribution = {"num_classes": 2, "prototypes_per_class": 2}
|
||||
|
||||
self.proto_layer = LabeledComponents(
|
||||
prototype_distribution,
|
||||
prototype_initializer,
|
||||
)
|
||||
|
||||
self.omega = torch.nn.Linear(in_features=100,
|
||||
out_features=100,
|
||||
bias=False)
|
||||
torch.nn.init.eye_(self.omega.weight)
|
||||
|
||||
def forward(self, x):
|
||||
protos = self.proto_layer.components
|
||||
plabels = self.proto_layer.component_labels
|
||||
|
||||
# Process `x` and `protos` through `omega`
|
||||
x_map = self.omega(x)
|
||||
protos_map = self.omega(protos)
|
||||
|
||||
# Compute distances and output
|
||||
dis = sed(x_map, protos_map)
|
||||
return dis, plabels
|
||||
|
||||
|
||||
# Build the GLVQ model
|
||||
model = Model()
|
||||
|
||||
# Print a summary of the model
|
||||
print(model)
|
||||
|
||||
# Optimize using Adam optimizer from `torch.optim`
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.001_0)
|
||||
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=75, gamma=0.1)
|
||||
criterion = GLVQLoss(squashing="identity", beta=10)
|
||||
|
||||
# Training loop
|
||||
for epoch in range(150):
|
||||
epoch_loss = 0.0 # zero-out epoch loss
|
||||
optimizer.zero_grad() # zero-out gradients
|
||||
for xb, yb in train_loader:
|
||||
# Compute loss
|
||||
distances, plabels = model(xb)
|
||||
loss = criterion([distances, plabels], yb)
|
||||
epoch_loss += loss.item()
|
||||
# Backprop
|
||||
loss.backward()
|
||||
# Take a gradient descent step
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
|
||||
lr = optimizer.param_groups[0]["lr"]
|
||||
print(f"Epoch: {epoch + 1:03d} Loss: {epoch_loss:06.02f} lr: {lr:07.06f}")
|
||||
|
||||
# Get the omega matrix form the model
|
||||
omega = model.omega.weight.data.numpy().T
|
||||
|
||||
# Visualize the lambda matrix
|
||||
title = "Lambda Matrix Visualization"
|
||||
fig = plt.figure(title)
|
||||
ax = fig.gca()
|
||||
ax.set_title(title)
|
||||
im = ax.imshow(omega.dot(omega.T), cmap="viridis")
|
||||
plt.show()
|
||||
|
||||
# Get the prototypes form the model
|
||||
protos = model.proto_layer.components.numpy()
|
||||
plabels = model.proto_layer.component_labels.numpy()
|
||||
|
||||
# Visualize the prototypes
|
||||
title = "Tecator Prototypes"
|
||||
fig = plt.figure(title)
|
||||
ax = fig.gca()
|
||||
ax.set_title(title)
|
||||
ax.set_xlabel("Spectral frequencies")
|
||||
ax.set_ylabel("Absorption")
|
||||
clabels = ["Class 0 - Low fat", "Class 1 - High fat"]
|
||||
handles, colors = get_legend_handles(clabels, marker="line", zero_indexed=True)
|
||||
for x, y in zip(protos, plabels):
|
||||
ax.plot(x, c=colors[int(y)])
|
||||
ax.legend(handles, clabels)
|
||||
plt.show()
|
183
examples/gtlvq_mnist.py
Normal file
183
examples/gtlvq_mnist.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""
|
||||
ProtoTorch GTLVQ example using MNIST data.
|
||||
The GTLVQ is placed as an classification model on
|
||||
top of a CNN, considered as featurer extractor.
|
||||
Initialization of subpsace and prototypes in
|
||||
Siamnese fashion
|
||||
For more info about GTLVQ see:
|
||||
DOI:10.1109/IJCNN.2016.7727534
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torchvision
|
||||
from prototorch.functions.helper import calculate_prototype_accuracy
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from prototorch.modules.models import GTLVQ
|
||||
from torchvision import transforms
|
||||
|
||||
# Parameters and options
|
||||
num_epochs = 50
|
||||
batch_size_train = 64
|
||||
batch_size_test = 1000
|
||||
learning_rate = 0.1
|
||||
momentum = 0.5
|
||||
log_interval = 10
|
||||
cuda = "cuda:0"
|
||||
random_seed = 1
|
||||
device = torch.device(cuda if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Configures reproducability
|
||||
torch.manual_seed(random_seed)
|
||||
np.random.seed(random_seed)
|
||||
|
||||
# Prepare and preprocess the data
|
||||
train_loader = torch.utils.data.DataLoader(
|
||||
torchvision.datasets.MNIST(
|
||||
"./files/",
|
||||
train=True,
|
||||
download=True,
|
||||
transform=torchvision.transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307, ), (0.3081, ))
|
||||
]),
|
||||
),
|
||||
batch_size=batch_size_train,
|
||||
shuffle=True,
|
||||
)
|
||||
|
||||
test_loader = torch.utils.data.DataLoader(
|
||||
torchvision.datasets.MNIST(
|
||||
"./files/",
|
||||
train=False,
|
||||
download=True,
|
||||
transform=torchvision.transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307, ), (0.3081, ))
|
||||
]),
|
||||
),
|
||||
batch_size=batch_size_test,
|
||||
shuffle=True,
|
||||
)
|
||||
|
||||
|
||||
# Define the GLVQ model plus appropriate feature extractor
|
||||
class CNNGTLVQ(torch.nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
num_classes,
|
||||
subspace_data,
|
||||
prototype_data,
|
||||
tangent_projection_type="local",
|
||||
prototypes_per_class=2,
|
||||
bottleneck_dim=128,
|
||||
):
|
||||
super(CNNGTLVQ, self).__init__()
|
||||
|
||||
# Feature Extractor - Simple CNN
|
||||
self.fe = nn.Sequential(
|
||||
nn.Conv2d(1, 32, 3, 1),
|
||||
nn.ReLU(),
|
||||
nn.Conv2d(32, 64, 3, 1),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2),
|
||||
nn.Dropout(0.25),
|
||||
nn.Flatten(),
|
||||
nn.Linear(9216, bottleneck_dim),
|
||||
nn.Dropout(0.5),
|
||||
nn.LeakyReLU(),
|
||||
nn.LayerNorm(bottleneck_dim),
|
||||
)
|
||||
|
||||
# Forward pass of subspace and prototype initialization data through feature extractor
|
||||
subspace_data = self.fe(subspace_data)
|
||||
prototype_data[0] = self.fe(prototype_data[0])
|
||||
|
||||
# Initialization of GTLVQ
|
||||
self.gtlvq = GTLVQ(
|
||||
num_classes,
|
||||
subspace_data,
|
||||
prototype_data,
|
||||
tangent_projection_type=tangent_projection_type,
|
||||
feature_dim=bottleneck_dim,
|
||||
prototypes_per_class=prototypes_per_class,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
# Feature Extraction
|
||||
x = self.fe(x)
|
||||
|
||||
# GTLVQ Forward pass
|
||||
dis = self.gtlvq(x)
|
||||
return dis
|
||||
|
||||
|
||||
# Get init data
|
||||
subspace_data = torch.cat(
|
||||
[next(iter(train_loader))[0],
|
||||
next(iter(test_loader))[0]])
|
||||
prototype_data = next(iter(train_loader))
|
||||
|
||||
# Build the CNN GTLVQ model
|
||||
model = CNNGTLVQ(
|
||||
10,
|
||||
subspace_data,
|
||||
prototype_data,
|
||||
tangent_projection_type="local",
|
||||
bottleneck_dim=128,
|
||||
).to(device)
|
||||
|
||||
# Optimize using SGD optimizer from `torch.optim`
|
||||
optimizer = torch.optim.Adam(
|
||||
[{
|
||||
"params": model.fe.parameters()
|
||||
}, {
|
||||
"params": model.gtlvq.parameters()
|
||||
}],
|
||||
lr=learning_rate,
|
||||
)
|
||||
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||
|
||||
# Training loop
|
||||
for epoch in range(num_epochs):
|
||||
for batch_idx, (x_train, y_train) in enumerate(train_loader):
|
||||
model.train()
|
||||
x_train, y_train = x_train.to(device), y_train.to(device)
|
||||
optimizer.zero_grad()
|
||||
|
||||
distances = model(x_train)
|
||||
plabels = model.gtlvq.cls.component_labels.to(device)
|
||||
|
||||
# Compute loss.
|
||||
loss = criterion([distances, plabels], y_train)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# GTLVQ uses projected SGD, which means to orthogonalize the subspaces after every gradient update.
|
||||
model.gtlvq.orthogonalize_subspace()
|
||||
|
||||
if batch_idx % log_interval == 0:
|
||||
acc = calculate_prototype_accuracy(distances, y_train, plabels)
|
||||
print(
|
||||
f"Epoch: {epoch + 1:02d}/{num_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
|
||||
Train Acc: {acc.item():02.02f}")
|
||||
|
||||
# Test
|
||||
with torch.no_grad():
|
||||
model.eval()
|
||||
correct = 0
|
||||
total = 0
|
||||
for x_test, y_test in test_loader:
|
||||
x_test, y_test = x_test.to(device), y_test.to(device)
|
||||
test_distances = model(torch.tensor(x_test))
|
||||
test_plabels = model.gtlvq.cls.prototype_labels.to(device)
|
||||
i = torch.argmin(test_distances, 1)
|
||||
correct += torch.sum(y_test == test_plabels[i])
|
||||
total += y_test.size(0)
|
||||
print("Accuracy of the network on the test images: %d %%" %
|
||||
(torch.true_divide(correct, total) * 100))
|
||||
|
||||
# Save the model
|
||||
PATH = "./glvq_mnist_model.pth"
|
||||
torch.save(model.state_dict(), PATH)
|
108
examples/lgmlvq_iris.py
Normal file
108
examples/lgmlvq_iris.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""ProtoTorch LGMLVQ example using 2D Iris data."""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||
from prototorch.functions.competitions import stratified_min
|
||||
from prototorch.functions.distances import lomega_distance
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.metrics import accuracy_score
|
||||
|
||||
# Prepare training data
|
||||
x_train, y_train = load_iris(True)
|
||||
x_train = x_train[:, [0, 2]]
|
||||
|
||||
|
||||
# Define the model
|
||||
class Model(torch.nn.Module):
|
||||
def __init__(self):
|
||||
"""Local-GMLVQ model."""
|
||||
super().__init__()
|
||||
|
||||
prototype_initializer = StratifiedMeanInitializer([x_train, y_train])
|
||||
prototype_distribution = [1, 2, 2]
|
||||
self.proto_layer = LabeledComponents(
|
||||
prototype_distribution,
|
||||
prototype_initializer,
|
||||
)
|
||||
|
||||
omegas = torch.eye(2, 2).repeat(5, 1, 1)
|
||||
self.omegas = torch.nn.Parameter(omegas)
|
||||
|
||||
def forward(self, x):
|
||||
protos, plabels = self.proto_layer()
|
||||
omegas = self.omegas
|
||||
dis = lomega_distance(x, protos, omegas)
|
||||
return dis, plabels
|
||||
|
||||
|
||||
# Build the model
|
||||
model = Model()
|
||||
|
||||
# Optimize using Adam optimizer from `torch.optim`
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
||||
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||
|
||||
x_in = torch.Tensor(x_train)
|
||||
y_in = torch.Tensor(y_train)
|
||||
|
||||
# Training loop
|
||||
title = "Prototype Visualization"
|
||||
fig = plt.figure(title)
|
||||
for epoch in range(100):
|
||||
# Compute loss
|
||||
dis, plabels = model(x_in)
|
||||
loss = criterion([dis, plabels], y_in)
|
||||
y_pred = np.argmin(stratified_min(dis, plabels).detach().numpy(), axis=1)
|
||||
acc = accuracy_score(y_train, y_pred)
|
||||
log_string = f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} "
|
||||
log_string += f"Acc: {acc * 100:05.02f}%"
|
||||
print(log_string)
|
||||
|
||||
# Take a gradient descent step
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# Get the prototypes form the model
|
||||
protos = model.proto_layer.components.numpy()
|
||||
|
||||
# Visualize the data and the prototypes
|
||||
ax = fig.gca()
|
||||
ax.cla()
|
||||
ax.set_title(title)
|
||||
ax.set_xlabel("Data dimension 1")
|
||||
ax.set_ylabel("Data dimension 2")
|
||||
cmap = "viridis"
|
||||
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
|
||||
ax.scatter(
|
||||
protos[:, 0],
|
||||
protos[:, 1],
|
||||
c=plabels,
|
||||
cmap=cmap,
|
||||
edgecolor="k",
|
||||
marker="D",
|
||||
s=50,
|
||||
)
|
||||
|
||||
# Paint decision regions
|
||||
x = np.vstack((x_train, protos))
|
||||
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
|
||||
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
|
||||
xx, yy = np.meshgrid(np.arange(x_min, x_max, 1 / 50),
|
||||
np.arange(y_min, y_max, 1 / 50))
|
||||
mesh_input = np.c_[xx.ravel(), yy.ravel()]
|
||||
|
||||
d, plabels = model(torch.Tensor(mesh_input))
|
||||
y_pred = np.argmin(stratified_min(d, plabels).detach().numpy(), axis=1)
|
||||
y_pred = y_pred.reshape(xx.shape)
|
||||
|
||||
# Plot voronoi regions
|
||||
ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)
|
||||
|
||||
ax.set_xlim(left=x_min + 0, right=x_max - 0)
|
||||
ax.set_ylim(bottom=y_min + 0, top=y_max - 0)
|
||||
|
||||
plt.pause(0.1)
|
@@ -1,7 +1,6 @@
|
||||
"""ProtoTorch package."""
|
||||
|
||||
import pkgutil
|
||||
from typing import List
|
||||
|
||||
import pkg_resources
|
||||
|
||||
@@ -9,7 +8,7 @@ from . import components, datasets, functions, modules, utils
|
||||
from .datasets import *
|
||||
|
||||
# Core Setup
|
||||
__version__ = "0.5.1"
|
||||
__version__ = "0.5.0"
|
||||
|
||||
__all_core__ = [
|
||||
"datasets",
|
||||
@@ -20,7 +19,7 @@ __all_core__ = [
|
||||
]
|
||||
|
||||
# Plugin Loader
|
||||
__path__: List[str] = pkgutil.extend_path(__path__, __name__)
|
||||
__path__ = pkgutil.extend_path(__path__, __name__)
|
||||
|
||||
|
||||
def discover_plugins():
|
||||
|
@@ -3,13 +3,13 @@
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from prototorch.components.initializers import (ClassAwareInitializer,
|
||||
ComponentsInitializer,
|
||||
CustomLabelsInitializer,
|
||||
EqualLabelsInitializer,
|
||||
UnequalLabelsInitializer,
|
||||
ZeroReasoningsInitializer)
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from .initializers import parse_data_arg
|
||||
|
||||
@@ -21,9 +21,7 @@ def get_labels_object(distribution):
|
||||
distribution["num_classes"],
|
||||
distribution["prototypes_per_class"])
|
||||
else:
|
||||
clabels = list(distribution.keys())
|
||||
dist = list(distribution.values())
|
||||
labels = UnequalLabelsInitializer(dist, clabels)
|
||||
labels = CustomLabelsInitializer(distribution)
|
||||
elif isinstance(distribution, tuple):
|
||||
num_classes, prototypes_per_class = distribution
|
||||
labels = EqualLabelsInitializer(num_classes, prototypes_per_class)
|
||||
@@ -44,45 +42,6 @@ def _precheck_initializer(initializer):
|
||||
raise TypeError(emsg)
|
||||
|
||||
|
||||
class LinearMapping(torch.nn.Module):
|
||||
"""LinearMapping is a learnable Mapping Matrix."""
|
||||
def __init__(self,
|
||||
mapping_shape=None,
|
||||
initializer=None,
|
||||
*,
|
||||
initialized_linearmapping=None):
|
||||
super().__init__()
|
||||
|
||||
# Ignore all initialization settings if initialized_components is given.
|
||||
if initialized_linearmapping is not None:
|
||||
self._register_mapping(initialized_linearmapping)
|
||||
if num_components is not None or initializer is not None:
|
||||
wmsg = "Arguments ignored while initializing Components"
|
||||
warnings.warn(wmsg)
|
||||
else:
|
||||
self._initialize_mapping(mapping_shape, initializer)
|
||||
|
||||
@property
|
||||
def mapping_shape(self):
|
||||
return self._omega.shape
|
||||
|
||||
def _register_mapping(self, components):
|
||||
self.register_parameter("_omega", Parameter(components))
|
||||
|
||||
def _initialize_mapping(self, mapping_shape, initializer):
|
||||
_precheck_initializer(initializer)
|
||||
_mapping = initializer.generate(mapping_shape)
|
||||
self._register_mapping(_mapping)
|
||||
|
||||
@property
|
||||
def mapping(self):
|
||||
"""Tensor containing the component tensors."""
|
||||
return self._omega.detach()
|
||||
|
||||
def forward(self):
|
||||
return self._omega
|
||||
|
||||
|
||||
class Components(torch.nn.Module):
|
||||
"""Components is a set of learnable Tensors."""
|
||||
def __init__(self,
|
||||
@@ -159,7 +118,7 @@ class LabeledComponents(Components):
|
||||
components, component_labels = parse_data_arg(
|
||||
initialized_components)
|
||||
super().__init__(initialized_components=components)
|
||||
self._register_labels(component_labels)
|
||||
self._labels = component_labels
|
||||
else:
|
||||
labels = get_labels_object(distribution)
|
||||
self.initial_distribution = labels.distribution
|
||||
@@ -197,7 +156,7 @@ class LabeledComponents(Components):
|
||||
|
||||
# Components
|
||||
if isinstance(initializer, ClassAwareInitializer):
|
||||
_new = initializer.generate(len(new_labels), distribution)
|
||||
_new = initializer.generate(len(new_labels), labels.distribution)
|
||||
else:
|
||||
_new = initializer.generate(len(new_labels))
|
||||
_components = torch.cat([self._components, _new])
|
||||
@@ -221,7 +180,7 @@ class LabeledComponents(Components):
|
||||
|
||||
|
||||
class ReasoningComponents(Components):
|
||||
r"""ReasoningComponents generate a set of components and a set of reasoning matrices.
|
||||
"""ReasoningComponents generate a set of components and a set of reasoning matrices.
|
||||
|
||||
Every Component has a reasoning matrix assigned.
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
"""ProtoTroch Initializers."""
|
||||
"""ProtoTroch Component and Label Initializers."""
|
||||
|
||||
import warnings
|
||||
from collections.abc import Iterable
|
||||
from itertools import chain
|
||||
@@ -167,14 +168,6 @@ class StratifiedSelectionInitializer(ClassAwareInitializer):
|
||||
return samples
|
||||
|
||||
|
||||
# Omega matrix
|
||||
class PcaInitializer(DataAwareInitializer):
|
||||
def generate(self, shape):
|
||||
(input_dim, latent_dim) = shape
|
||||
(_, eigVal, eigVec) = torch.pca_lowrank(self.data, q=latent_dim)
|
||||
return eigVec
|
||||
|
||||
|
||||
# Labels
|
||||
class LabelsInitializer:
|
||||
def generate(self):
|
||||
@@ -182,17 +175,19 @@ class LabelsInitializer:
|
||||
|
||||
|
||||
class UnequalLabelsInitializer(LabelsInitializer):
|
||||
def __init__(self, dist, clabels=None):
|
||||
def __init__(self, dist):
|
||||
self.dist = dist
|
||||
self.clabels = clabels or range(len(self.dist))
|
||||
|
||||
@property
|
||||
def distribution(self):
|
||||
return self.dist
|
||||
|
||||
def generate(self):
|
||||
targets = list(
|
||||
chain(*[[i] * n for i, n in zip(self.clabels, self.dist)]))
|
||||
def generate(self, clabels=None, dist=None):
|
||||
if not clabels:
|
||||
clabels = range(len(self.dist))
|
||||
if not dist:
|
||||
dist = self.dist
|
||||
targets = list(chain(*[[i] * n for i, n in zip(clabels, dist)]))
|
||||
return torch.LongTensor(targets)
|
||||
|
||||
|
||||
@@ -209,6 +204,13 @@ class EqualLabelsInitializer(LabelsInitializer):
|
||||
return torch.arange(self.classes).repeat(self.per_class, 1).T.flatten()
|
||||
|
||||
|
||||
class CustomLabelsInitializer(UnequalLabelsInitializer):
|
||||
def generate(self):
|
||||
clabels = list(self.dist.keys())
|
||||
dist = list(self.dist.values())
|
||||
return super().generate(clabels, dist)
|
||||
|
||||
|
||||
# Reasonings
|
||||
class ReasoningsInitializer:
|
||||
def generate(self, length):
|
||||
@@ -230,4 +232,3 @@ SMI = StratifiedMeanInitializer
|
||||
Random = RandomInitializer = UniformInitializer
|
||||
Zeros = ZerosInitializer
|
||||
Ones = OnesInitializer
|
||||
PCA = PcaInitializer
|
||||
|
@@ -8,11 +8,11 @@ URL:
|
||||
import warnings
|
||||
from typing import Sequence, Union
|
||||
|
||||
from prototorch.datasets.abstract import NumpyDataset
|
||||
|
||||
from sklearn.datasets import (load_iris, make_blobs, make_circles,
|
||||
make_classification, make_moons)
|
||||
|
||||
from prototorch.datasets.abstract import NumpyDataset
|
||||
|
||||
|
||||
class Iris(NumpyDataset):
|
||||
"""Iris Dataset by Ronald Fisher introduced in 1936.
|
||||
|
@@ -40,9 +40,8 @@ import os
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torchvision.datasets.utils import download_file_from_google_drive
|
||||
|
||||
from prototorch.datasets.abstract import ProtoDataset
|
||||
from torchvision.datasets.utils import download_file_from_google_drive
|
||||
|
||||
|
||||
class Tecator(ProtoDataset):
|
||||
|
@@ -2,7 +2,6 @@
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from prototorch.functions.helper import (_check_shapes, _int_and_mixed_shape,
|
||||
equal_int_shape, get_flat)
|
||||
|
||||
|
@@ -89,6 +89,6 @@ def _check_shapes(signal_int_shape, proto_int_shape):
|
||||
|
||||
def _int_and_mixed_shape(tensor):
|
||||
shape = mixed_shape(tensor)
|
||||
int_shape = tuple(i if isinstance(i, int) else None for i in shape)
|
||||
int_shape = tuple([i if isinstance(i, int) else None for i in shape])
|
||||
|
||||
return shape, int_shape
|
||||
|
@@ -1,32 +1,5 @@
|
||||
import torch
|
||||
|
||||
|
||||
# Functions
|
||||
def gaussian(distances, variance):
|
||||
return torch.exp(-(distances * distances) / (2 * variance))
|
||||
|
||||
|
||||
def rank_scaled_gaussian(distances, lambd):
|
||||
order = torch.argsort(distances, dim=1)
|
||||
ranks = torch.argsort(order, dim=1)
|
||||
|
||||
return torch.exp(-torch.exp(-ranks / lambd) * distances)
|
||||
|
||||
|
||||
# Modules
|
||||
class GaussianPrior(torch.nn.Module):
|
||||
def __init__(self, variance):
|
||||
super().__init__()
|
||||
self.variance = variance
|
||||
|
||||
def forward(self, distances):
|
||||
return gaussian(distances, self.variance)
|
||||
|
||||
|
||||
class RankScaledGaussianPrior(torch.nn.Module):
|
||||
def __init__(self, lambd):
|
||||
super().__init__()
|
||||
self.lambd = lambd
|
||||
|
||||
def forward(self, distances):
|
||||
return rank_scaled_gaussian(distances, self.lambd)
|
||||
def gaussian(distance, variance):
|
||||
return torch.exp(-(distance * distance) / (2 * variance))
|
||||
|
@@ -1,5 +1,7 @@
|
||||
"""ProtoTorch modules."""
|
||||
|
||||
from .competitions import *
|
||||
from .initializers import *
|
||||
from .pooling import *
|
||||
from .transformations import *
|
||||
from .wrappers import LambdaLayer, LossLayer
|
||||
|
@@ -1,7 +1,6 @@
|
||||
"""ProtoTorch Competition Modules."""
|
||||
|
||||
import torch
|
||||
|
||||
from prototorch.functions.competitions import knnc, wtac
|
||||
|
||||
|
||||
|
61
prototorch/modules/initializers.py
Normal file
61
prototorch/modules/initializers.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""ProtoTroch Module Initializers."""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
# Transformations
|
||||
class MatrixInitializer(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
...
|
||||
|
||||
def generate(self, shape):
|
||||
raise NotImplementedError("Subclasses should implement this!")
|
||||
|
||||
|
||||
class ZerosInitializer(MatrixInitializer):
|
||||
def generate(self, shape):
|
||||
return torch.zeros(shape)
|
||||
|
||||
|
||||
class OnesInitializer(MatrixInitializer):
|
||||
def __init__(self, scale=1.0):
|
||||
super().__init__()
|
||||
self.scale = scale
|
||||
|
||||
def generate(self, shape):
|
||||
return torch.ones(shape) * self.scale
|
||||
|
||||
|
||||
class UniformInitializer(MatrixInitializer):
|
||||
def __init__(self, minimum=0.0, maximum=1.0, scale=1.0):
|
||||
super().__init__()
|
||||
self.minimum = minimum
|
||||
self.maximum = maximum
|
||||
self.scale = scale
|
||||
|
||||
def generate(self, shape):
|
||||
return torch.ones(shape).uniform_(self.minimum,
|
||||
self.maximum) * self.scale
|
||||
|
||||
|
||||
class DataAwareInitializer(MatrixInitializer):
|
||||
def __init__(self, data, transform=torch.nn.Identity()):
|
||||
super().__init__()
|
||||
self.data = data
|
||||
self.transform = transform
|
||||
|
||||
def __del__(self):
|
||||
del self.data
|
||||
|
||||
|
||||
class EigenVectorInitializer(DataAwareInitializer):
|
||||
def generate(self, shape):
|
||||
# TODO
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
# Aliases
|
||||
EV = EigenVectorInitializer
|
||||
Random = RandomInitializer = UniformInitializer
|
||||
Zeros = ZerosInitializer
|
||||
Ones = OnesInitializer
|
@@ -1,7 +1,6 @@
|
||||
"""ProtoTorch losses."""
|
||||
|
||||
import torch
|
||||
|
||||
from prototorch.functions.activations import get_activation
|
||||
from prototorch.functions.losses import glvq_loss
|
||||
|
||||
|
@@ -1,9 +1,8 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||
from prototorch.functions.distances import euclidean_distance_matrix
|
||||
from prototorch.functions.normalization import orthogonalization
|
||||
from torch import nn
|
||||
|
||||
|
||||
class GTLVQ(nn.Module):
|
||||
|
@@ -1,7 +1,6 @@
|
||||
"""ProtoTorch Pooling Modules."""
|
||||
|
||||
import torch
|
||||
|
||||
from prototorch.functions.pooling import (stratified_max_pooling,
|
||||
stratified_min_pooling,
|
||||
stratified_prod_pooling,
|
||||
|
49
prototorch/modules/transformations.py
Normal file
49
prototorch/modules/transformations.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""ProtoTorch Transformation Layers."""
|
||||
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from .initializers import MatrixInitializer
|
||||
|
||||
|
||||
def _precheck_initializer(initializer):
|
||||
if not isinstance(initializer, MatrixInitializer):
|
||||
emsg = f"`initializer` has to be some subtype of " \
|
||||
f"{MatrixInitializer}. " \
|
||||
f"You have provided: {initializer=} instead."
|
||||
raise TypeError(emsg)
|
||||
|
||||
|
||||
class Omega(torch.nn.Module):
|
||||
"""The Omega mapping used in GMLVQ."""
|
||||
def __init__(self,
|
||||
num_replicas=1,
|
||||
input_dim=None,
|
||||
latent_dim=None,
|
||||
initializer=None,
|
||||
*,
|
||||
initialized_weights=None):
|
||||
super().__init__()
|
||||
|
||||
if initialized_weights is not None:
|
||||
self._register_weights(initialized_weights)
|
||||
else:
|
||||
if num_replicas == 1:
|
||||
shape = (input_dim, latent_dim)
|
||||
else:
|
||||
shape = (num_replicas, input_dim, latent_dim)
|
||||
self._initialize_weights(shape, initializer)
|
||||
|
||||
def _register_weights(self, weights):
|
||||
self.register_parameter("_omega", Parameter(weights))
|
||||
|
||||
def _initialize_weights(self, shape, initializer):
|
||||
_precheck_initializer(initializer)
|
||||
_omega = initializer.generate(shape)
|
||||
self._register_weights(_omega)
|
||||
|
||||
def forward(self):
|
||||
return self._omega
|
||||
|
||||
def extra_repr(self):
|
||||
return f"(omega): (shape: {tuple(self._omega.shape)})"
|
23
setup.py
23
setup.py
@@ -1,12 +1,10 @@
|
||||
"""
|
||||
|
||||
######
|
||||
# # ##### #### ##### #### ##### #### ##### #### # #
|
||||
# # # # # # # # # # # # # # # # # #
|
||||
###### # # # # # # # # # # # # # ######
|
||||
# ##### # # # # # # # # ##### # # #
|
||||
# # # # # # # # # # # # # # # # #
|
||||
# # # #### # #### # #### # # #### # #
|
||||
_____ _ _______ _
|
||||
| __ \ | | |__ __| | |
|
||||
| |__) | __ ___ | |_ ___ | | ___ _ __ ___| |__
|
||||
| ___/ '__/ _ \| __/ _ \| |/ _ \| '__/ __| '_ \
|
||||
| | | | | (_) | || (_) | | (_) | | | (__| | | |
|
||||
|_| |_| \___/ \__\___/|_|\___/|_| \___|_| |_|
|
||||
|
||||
ProtoTorch Core Package
|
||||
"""
|
||||
@@ -20,7 +18,7 @@ with open("README.md", "r") as fh:
|
||||
|
||||
INSTALL_REQUIRES = [
|
||||
"torch>=1.3.1",
|
||||
"torchvision>=0.5.1",
|
||||
"torchvision>=0.5.0",
|
||||
"numpy>=1.9.1",
|
||||
"sklearn",
|
||||
]
|
||||
@@ -28,10 +26,7 @@ DATASETS = [
|
||||
"requests",
|
||||
"tqdm",
|
||||
]
|
||||
DEV = [
|
||||
"bumpversion",
|
||||
"pre-commit",
|
||||
]
|
||||
DEV = ["bumpversion"]
|
||||
DOCS = [
|
||||
"recommonmark",
|
||||
"sphinx",
|
||||
@@ -48,7 +43,7 @@ ALL = DATASETS + DEV + DOCS + EXAMPLES + TESTS
|
||||
|
||||
setup(
|
||||
name="prototorch",
|
||||
version="0.5.1",
|
||||
version="0.5.0",
|
||||
description="Highly extensible, GPU-supported "
|
||||
"Learning Vector Quantization (LVQ) toolbox "
|
||||
"built using PyTorch and its nn API.",
|
||||
|
@@ -1,8 +1,7 @@
|
||||
"""ProtoTorch components test suite."""
|
||||
|
||||
import torch
|
||||
|
||||
import prototorch as pt
|
||||
import torch
|
||||
|
||||
|
||||
def test_labcomps_zeros_init():
|
||||
|
@@ -4,7 +4,6 @@ import unittest
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from prototorch.functions import (activations, competitions, distances,
|
||||
initializers, losses, pooling)
|
||||
|
||||
|
Reference in New Issue
Block a user