Use 'num_' in all variable names
This commit is contained in:
@@ -3,14 +3,13 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from matplotlib import pyplot as plt
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from torchinfo import summary
|
||||
|
||||
from prototorch.functions.competitions import wtac
|
||||
from prototorch.functions.distances import euclidean_distance
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from prototorch.modules.prototypes import Prototypes1D
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
from torchinfo import summary
|
||||
|
||||
# Prepare and preprocess the data
|
||||
scaler = StandardScaler()
|
||||
@@ -28,7 +27,7 @@ class Model(torch.nn.Module):
|
||||
self.proto_layer = Prototypes1D(
|
||||
input_dim=2,
|
||||
prototypes_per_class=3,
|
||||
nclasses=3,
|
||||
num_classes=3,
|
||||
prototype_initializer="stratified_random",
|
||||
data=[x_train, y_train],
|
||||
)
|
||||
|
@@ -2,13 +2,12 @@
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from prototorch.datasets.tecator import Tecator
|
||||
from prototorch.functions.distances import sed
|
||||
from prototorch.modules import Prototypes1D
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from prototorch.utils.colors import get_legend_handles
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
# Prepare the dataset and dataloader
|
||||
train_data = Tecator(root="./artifacts", train=True)
|
||||
@@ -23,7 +22,7 @@ class Model(torch.nn.Module):
|
||||
self.p1 = Prototypes1D(
|
||||
input_dim=100,
|
||||
prototypes_per_class=2,
|
||||
nclasses=2,
|
||||
num_classes=2,
|
||||
prototype_initializer="stratified_random",
|
||||
data=[x, y],
|
||||
)
|
||||
|
@@ -12,14 +12,13 @@ import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torchvision
|
||||
from torchvision import transforms
|
||||
|
||||
from prototorch.functions.helper import calculate_prototype_accuracy
|
||||
from prototorch.modules.losses import GLVQLoss
|
||||
from prototorch.modules.models import GTLVQ
|
||||
from torchvision import transforms
|
||||
|
||||
# Parameters and options
|
||||
n_epochs = 50
|
||||
num_epochs = 50
|
||||
batch_size_train = 64
|
||||
batch_size_test = 1000
|
||||
learning_rate = 0.1
|
||||
@@ -141,7 +140,7 @@ optimizer = torch.optim.Adam(
|
||||
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||
|
||||
# Training loop
|
||||
for epoch in range(n_epochs):
|
||||
for epoch in range(num_epochs):
|
||||
for batch_idx, (x_train, y_train) in enumerate(train_loader):
|
||||
model.train()
|
||||
x_train, y_train = x_train.to(device), y_train.to(device)
|
||||
@@ -161,7 +160,7 @@ for epoch in range(n_epochs):
|
||||
if batch_idx % log_interval == 0:
|
||||
acc = calculate_prototype_accuracy(distances, y_train, plabels)
|
||||
print(
|
||||
f"Epoch: {epoch + 1:02d}/{n_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
|
||||
f"Epoch: {epoch + 1:02d}/{num_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
|
||||
Train Acc: {acc.item():02.02f}")
|
||||
|
||||
# Test
|
||||
|
Reference in New Issue
Block a user