Use 'num_' in all variable names

This commit is contained in:
Alexander Engelsberger 2021-05-25 15:41:10 +02:00
parent e7e6bf9173
commit 72e064338c
8 changed files with 24 additions and 23 deletions

View File

@ -24,10 +24,10 @@ if __name__ == "__main__":
batch_size=150) batch_size=150)
# Hyperparameters # Hyperparameters
nclasses = 3 num_classes = 3
prototypes_per_class = 2 prototypes_per_class = 2
hparams = dict( hparams = dict(
distribution=(nclasses, prototypes_per_class), distribution=(num_classes, prototypes_per_class),
lr=0.01, lr=0.01,
) )

View File

@ -13,7 +13,7 @@ if __name__ == "__main__":
args = parser.parse_args() args = parser.parse_args()
# Dataset # Dataset
train_ds = pt.datasets.Spiral(n_samples=600, noise=0.6) train_ds = pt.datasets.Spiral(num_samples=600, noise=0.6)
# Dataloaders # Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, train_loader = torch.utils.data.DataLoader(train_ds,
@ -21,10 +21,10 @@ if __name__ == "__main__":
batch_size=256) batch_size=256)
# Hyperparameters # Hyperparameters
nclasses = 2 num_classes = 2
prototypes_per_class = 20 prototypes_per_class = 20
hparams = dict( hparams = dict(
distribution=(nclasses, prototypes_per_class), distribution=(num_classes, prototypes_per_class),
transfer_function="sigmoid_beta", transfer_function="sigmoid_beta",
transfer_beta=10.0, transfer_beta=10.0,
lr=0.01, lr=0.01,

View File

@ -22,10 +22,10 @@ if __name__ == "__main__":
num_workers=0, num_workers=0,
batch_size=150) batch_size=150)
# Hyperparameters # Hyperparameters
nclasses = 3 num_classes = 3
prototypes_per_class = 1 prototypes_per_class = 1
hparams = dict( hparams = dict(
distribution=(nclasses, prototypes_per_class), distribution=(num_classes, prototypes_per_class),
input_dim=x_train.shape[1], input_dim=x_train.shape[1],
latent_dim=x_train.shape[1], latent_dim=x_train.shape[1],
proto_lr=0.01, proto_lr=0.01,

View File

@ -41,12 +41,12 @@ if __name__ == "__main__":
batch_size=256) batch_size=256)
# Hyperparameters # Hyperparameters
nclasses = 10 num_classes = 10
prototypes_per_class = 2 prototypes_per_class = 2
hparams = dict( hparams = dict(
input_dim=28 * 28, input_dim=28 * 28,
latent_dim=28 * 28, latent_dim=28 * 28,
distribution=(nclasses, prototypes_per_class), distribution=(num_classes, prototypes_per_class),
proto_lr=0.01, proto_lr=0.01,
bb_lr=0.01, bb_lr=0.01,
) )
@ -61,7 +61,7 @@ if __name__ == "__main__":
# Callbacks # Callbacks
vis = pt.models.VisImgComp( vis = pt.models.VisImgComp(
data=train_ds, data=train_ds,
nrow=5, num_columns=5,
show=False, show=False,
tensorboard=True, tensorboard=True,
random_data=20, random_data=20,

View File

@ -24,10 +24,10 @@ if __name__ == "__main__":
test_loader = torch.utils.data.DataLoader(test_ds, batch_size=32) test_loader = torch.utils.data.DataLoader(test_ds, batch_size=32)
# Hyperparameters # Hyperparameters
nclasses = 2 num_classes = 2
prototypes_per_class = 2 prototypes_per_class = 2
hparams = dict( hparams = dict(
distribution=(nclasses, prototypes_per_class), distribution=(num_classes, prototypes_per_class),
input_dim=100, input_dim=100,
latent_dim=2, latent_dim=2,
proto_lr=0.001, proto_lr=0.001,

View File

@ -31,9 +31,9 @@ class PrototypeImageModel(pl.LightningModule):
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx): def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
self.proto_layer.components.data.clamp_(0.0, 1.0) self.proto_layer.components.data.clamp_(0.0, 1.0)
def get_prototype_grid(self, nrow=2, return_channels_last=True): def get_prototype_grid(self, num_columns=2, return_channels_last=True):
from torchvision.utils import make_grid from torchvision.utils import make_grid
grid = make_grid(self.components, nrow=nrow) grid = make_grid(self.components, nrow=num_columns)
if return_channels_last: if return_channels_last:
grid = grid.permute((1, 2, 0)) grid = grid.permute((1, 2, 0))
return grid.cpu() return grid.cpu()

View File

@ -58,9 +58,9 @@ class MarginLoss(torch.nn.modules.loss._Loss):
class ReasoningLayer(torch.nn.Module): class ReasoningLayer(torch.nn.Module):
def __init__(self, num_components, num_classes, n_replicas=1): def __init__(self, num_components, num_classes, num_replicas=1):
super().__init__() super().__init__()
self.n_replicas = n_replicas self.num_replicas = num_replicas
self.num_classes = num_classes self.num_classes = num_classes
probabilities_init = torch.zeros(2, 1, num_components, probabilities_init = torch.zeros(2, 1, num_components,
self.num_classes) self.num_classes)
@ -122,8 +122,8 @@ class CBC(SiameseGLVQ):
x, y = batch x, y = batch
# x = x.view(x.size(0), -1) # x = x.view(x.size(0), -1)
y_pred = self(x) y_pred = self(x)
nclasses = self.reasoning_layer.num_classes num_classes = self.reasoning_layer.num_classes
y_true = torch.nn.functional.one_hot(y.long(), num_classes=nclasses) y_true = torch.nn.functional.one_hot(y.long(), num_classes=num_classes)
loss = MarginLoss(self.margin)(y_pred, y_true).mean(dim=0) loss = MarginLoss(self.margin)(y_pred, y_true).mean(dim=0)
return y_pred, loss return y_pred, loss

View File

@ -246,14 +246,14 @@ class VisImgComp(Vis2DAbstract):
*args, *args,
random_data=0, random_data=0,
dataformats="CHW", dataformats="CHW",
nrow=2, num_columns=2,
add_embedding=False, add_embedding=False,
embedding_data=100, embedding_data=100,
**kwargs): **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.random_data = random_data self.random_data = random_data
self.dataformats = dataformats self.dataformats = dataformats
self.nrow = nrow self.num_columns = num_columns
self.add_embedding = add_embedding self.add_embedding = add_embedding
self.embedding_data = embedding_data self.embedding_data = embedding_data
@ -278,7 +278,7 @@ class VisImgComp(Vis2DAbstract):
size=self.random_data, size=self.random_data,
replace=False) replace=False)
data = self.x_train[ind] data = self.x_train[ind]
grid = torchvision.utils.make_grid(data, nrow=self.nrow) grid = torchvision.utils.make_grid(data, nrow=self.num_columns)
tb.add_image(tag="Data", tb.add_image(tag="Data",
img_tensor=grid, img_tensor=grid,
global_step=None, global_step=None,
@ -288,7 +288,7 @@ class VisImgComp(Vis2DAbstract):
tb = pl_module.logger.experiment tb = pl_module.logger.experiment
components = pl_module.components components = pl_module.components
grid = torchvision.utils.make_grid(components, nrow=self.nrow) grid = torchvision.utils.make_grid(components, nrow=self.num_columns)
tb.add_image( tb.add_image(
tag="Components", tag="Components",
img_tensor=grid, img_tensor=grid,
@ -302,7 +302,8 @@ class VisImgComp(Vis2DAbstract):
if self.show: if self.show:
components = pl_module.components components = pl_module.components
grid = torchvision.utils.make_grid(components, nrow=self.nrow) grid = torchvision.utils.make_grid(components,
nrow=self.num_columns)
plt.imshow(grid.permute((1, 2, 0)).cpu(), cmap=self.cmap) plt.imshow(grid.permute((1, 2, 0)).cpu(), cmap=self.cmap)
self.log_and_display(trainer, pl_module) self.log_and_display(trainer, pl_module)