Automatic Formatting.

This commit is contained in:
Alexander Engelsberger
2021-04-23 17:24:53 +02:00
parent e1d56595c1
commit 7c30ffe2c7
28 changed files with 393 additions and 321 deletions

View File

@@ -3,13 +3,14 @@
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from torchinfo import summary
from prototorch.functions.competitions import wtac
from prototorch.functions.distances import euclidean_distance
from prototorch.modules.losses import GLVQLoss
from prototorch.modules.prototypes import Prototypes1D
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from torchinfo import summary
# Prepare and preprocess the data
scaler = StandardScaler()
@@ -29,7 +30,8 @@ class Model(torch.nn.Module):
prototypes_per_class=3,
nclasses=3,
prototype_initializer="stratified_random",
data=[x_train, y_train])
data=[x_train, y_train],
)
def forward(self, x):
protos = self.proto_layer.prototypes
@@ -61,8 +63,10 @@ for epoch in range(70):
with torch.no_grad():
pred = wtac(dis, plabels)
correct = pred.eq(y_in.view_as(pred)).sum().item()
acc = 100. * correct / len(x_train)
print(f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} Acc: {acc:05.02f}%")
acc = 100.0 * correct / len(x_train)
print(
f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} Acc: {acc:05.02f}%"
)
# Take a gradient descent step
optimizer.zero_grad()
@@ -83,13 +87,15 @@ for epoch in range(70):
ax.set_ylabel("Data dimension 2")
cmap = "viridis"
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
ax.scatter(protos[:, 0],
protos[:, 1],
c=plabels,
cmap=cmap,
edgecolor="k",
marker="D",
s=50)
ax.scatter(
protos[:, 0],
protos[:, 1],
c=plabels,
cmap=cmap,
edgecolor="k",
marker="D",
s=50,
)
# Paint decision regions
x = np.vstack((x_train, protos))

View File

@@ -20,11 +20,13 @@ class Model(torch.nn.Module):
"""GMLVQ model as a siamese network."""
super().__init__()
x, y = train_data.data, train_data.targets
self.p1 = Prototypes1D(input_dim=100,
prototypes_per_class=2,
nclasses=2,
prototype_initializer="stratified_random",
data=[x, y])
self.p1 = Prototypes1D(
input_dim=100,
prototypes_per_class=2,
nclasses=2,
prototype_initializer="stratified_random",
data=[x, y],
)
self.omega = torch.nn.Linear(in_features=100,
out_features=100,
bias=False)

View File

@@ -13,8 +13,9 @@ import torch
import torch.nn as nn
import torchvision
from torchvision import transforms
from prototorch.modules.losses import GLVQLoss
from prototorch.functions.helper import calculate_prototype_accuracy
from prototorch.modules.losses import GLVQLoss
from prototorch.modules.models import GTLVQ
# Parameters and options
@@ -26,32 +27,40 @@ momentum = 0.5
log_interval = 10
cuda = "cuda:1"
random_seed = 1
device = torch.device(cuda if torch.cuda.is_available() else 'cpu')
device = torch.device(cuda if torch.cuda.is_available() else "cpu")
# Configures reproducability
torch.manual_seed(random_seed)
np.random.seed(random_seed)
# Prepare and preprocess the data
train_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
'./files/',
train=True,
download=True,
transform=torchvision.transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])),
batch_size=batch_size_train,
shuffle=True)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
"./files/",
train=True,
download=True,
transform=torchvision.transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]),
),
batch_size=batch_size_train,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(torchvision.datasets.MNIST(
'./files/',
train=False,
download=True,
transform=torchvision.transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])),
batch_size=batch_size_test,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
"./files/",
train=False,
download=True,
transform=torchvision.transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
]),
),
batch_size=batch_size_test,
shuffle=True,
)
# Define the GLVQ model plus appropriate feature extractor
@@ -67,25 +76,34 @@ class CNNGTLVQ(torch.nn.Module):
):
super(CNNGTLVQ, self).__init__()
#Feature Extractor - Simple CNN
self.fe = nn.Sequential(nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.ReLU(),
nn.MaxPool2d(2), nn.Dropout(0.25),
nn.Flatten(), nn.Linear(9216, bottleneck_dim),
nn.Dropout(0.5), nn.LeakyReLU(),
nn.LayerNorm(bottleneck_dim))
# Feature Extractor - Simple CNN
self.fe = nn.Sequential(
nn.Conv2d(1, 32, 3, 1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, 1),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Dropout(0.25),
nn.Flatten(),
nn.Linear(9216, bottleneck_dim),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.LayerNorm(bottleneck_dim),
)
# Forward pass of subspace and prototype initialization data through feature extractor
subspace_data = self.fe(subspace_data)
prototype_data[0] = self.fe(prototype_data[0])
# Initialization of GTLVQ
self.gtlvq = GTLVQ(num_classes,
subspace_data,
prototype_data,
tangent_projection_type=tangent_projection_type,
feature_dim=bottleneck_dim,
prototypes_per_class=prototypes_per_class)
self.gtlvq = GTLVQ(
num_classes,
subspace_data,
prototype_data,
tangent_projection_type=tangent_projection_type,
feature_dim=bottleneck_dim,
prototypes_per_class=prototypes_per_class,
)
def forward(self, x):
# Feature Extraction
@@ -103,20 +121,24 @@ subspace_data = torch.cat(
prototype_data = next(iter(train_loader))
# Build the CNN GTLVQ model
model = CNNGTLVQ(10,
subspace_data,
prototype_data,
tangent_projection_type="local",
bottleneck_dim=128).to(device)
model = CNNGTLVQ(
10,
subspace_data,
prototype_data,
tangent_projection_type="local",
bottleneck_dim=128,
).to(device)
# Optimize using SGD optimizer from `torch.optim`
optimizer = torch.optim.Adam([{
'params': model.fe.parameters()
}, {
'params': model.gtlvq.parameters()
}],
lr=learning_rate)
criterion = GLVQLoss(squashing='sigmoid_beta', beta=10)
optimizer = torch.optim.Adam(
[{
"params": model.fe.parameters()
}, {
"params": model.gtlvq.parameters()
}],
lr=learning_rate,
)
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
# Training loop
for epoch in range(n_epochs):
@@ -139,8 +161,8 @@ for epoch in range(n_epochs):
if batch_idx % log_interval == 0:
acc = calculate_prototype_accuracy(distances, y_train, plabels)
print(
f'Epoch: {epoch + 1:02d}/{n_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
Train Acc: {acc.item():02.02f}')
f"Epoch: {epoch + 1:02d}/{n_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
Train Acc: {acc.item():02.02f}")
# Test
with torch.no_grad():
@@ -154,9 +176,9 @@ for epoch in range(n_epochs):
i = torch.argmin(test_distances, 1)
correct += torch.sum(y_test == test_plabels[i])
total += y_test.size(0)
print('Accuracy of the network on the test images: %d %%' %
print("Accuracy of the network on the test images: %d %%" %
(torch.true_divide(correct, total) * 100))
# Save the model
PATH = './glvq_mnist_model.pth'
PATH = "./glvq_mnist_model.pth"
torch.save(model.state_dict(), PATH)

View File

@@ -22,10 +22,12 @@ class Model(torch.nn.Module):
def __init__(self):
"""Local-GMLVQ model."""
super().__init__()
self.p1 = Prototypes1D(input_dim=2,
prototype_distribution=[1, 2, 2],
prototype_initializer="stratified_random",
data=[x_train, y_train])
self.p1 = Prototypes1D(
input_dim=2,
prototype_distribution=[1, 2, 2],
prototype_initializer="stratified_random",
data=[x_train, y_train],
)
omegas = torch.zeros(5, 2, 2)
self.omegas = torch.nn.Parameter(omegas)
eye_(self.omegas)
@@ -76,14 +78,16 @@ for epoch in range(100):
ax.set_xlabel("Data dimension 1")
ax.set_ylabel("Data dimension 2")
cmap = "viridis"
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor='k')
ax.scatter(protos[:, 0],
protos[:, 1],
c=plabels,
cmap=cmap,
edgecolor='k',
marker='D',
s=50)
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
ax.scatter(
protos[:, 0],
protos[:, 1],
c=plabels,
cmap=cmap,
edgecolor="k",
marker="D",
s=50,
)
# Paint decision regions
x = np.vstack((x_train, protos))