feat: add binnam_xor.py
This commit is contained in:
parent
823b05e390
commit
cb7fb91c95
86
examples/binnam_xor.py
Normal file
86
examples/binnam_xor.py
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
"""Neural Additive Model (NAM) example for binary classification."""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
import prototorch as pt
|
||||||
|
import pytorch_lightning as pl
|
||||||
|
import torch
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Command-line arguments
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser = pl.Trainer.add_argparse_args(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Dataset
|
||||||
|
train_ds = pt.datasets.XOR()
|
||||||
|
|
||||||
|
# Dataloaders
|
||||||
|
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=256)
|
||||||
|
|
||||||
|
# Hyperparameters
|
||||||
|
hparams = dict(lr=0.001)
|
||||||
|
|
||||||
|
# Define the feature extractor
|
||||||
|
class FE(torch.nn.Module):
|
||||||
|
def __init__(self, hidden_size=10):
|
||||||
|
super().__init__()
|
||||||
|
self.modules_list = torch.nn.ModuleList([
|
||||||
|
torch.nn.Linear(1, hidden_size),
|
||||||
|
torch.nn.ReLU(),
|
||||||
|
torch.nn.Linear(hidden_size, 1),
|
||||||
|
torch.nn.ReLU(),
|
||||||
|
])
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
for m in self.modules_list:
|
||||||
|
x = m(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
# Initialize the model
|
||||||
|
model = pt.models.BinaryNAM(
|
||||||
|
hparams,
|
||||||
|
extractors=torch.nn.ModuleList([FE(20) for _ in range(2)]),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compute intermediate input and output sizes
|
||||||
|
model.example_input_array = torch.zeros(4, 2)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
# Callbacks
|
||||||
|
vis = pt.models.Vis2D(data=train_ds)
|
||||||
|
es = pl.callbacks.EarlyStopping(
|
||||||
|
monitor="train_loss",
|
||||||
|
min_delta=0.001,
|
||||||
|
patience=50,
|
||||||
|
mode="min",
|
||||||
|
verbose=False,
|
||||||
|
check_on_train_epoch_end=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Setup trainer
|
||||||
|
trainer = pl.Trainer.from_argparse_args(
|
||||||
|
args,
|
||||||
|
callbacks=[
|
||||||
|
vis,
|
||||||
|
es,
|
||||||
|
],
|
||||||
|
terminate_on_nan=True,
|
||||||
|
weights_summary="full",
|
||||||
|
accelerator="ddp",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
trainer.fit(model, train_loader)
|
||||||
|
|
||||||
|
# Visualize extractor shape functions
|
||||||
|
fig, axes = plt.subplots(2)
|
||||||
|
for i, ax in enumerate(axes.flat):
|
||||||
|
x = torch.linspace(0, 1, 100) # TODO use min/max from data
|
||||||
|
y = model.extractors[i](x.view(100, 1)).squeeze().detach()
|
||||||
|
ax.plot(x, y)
|
||||||
|
ax.set(title=f"Feature {i + 1}")
|
||||||
|
plt.show()
|
@ -16,7 +16,14 @@ class BinaryNAM(ProtoTorchBolt):
|
|||||||
def __init__(self, hparams: dict, extractors: torch.nn.ModuleList,
|
def __init__(self, hparams: dict, extractors: torch.nn.ModuleList,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
super().__init__(hparams, **kwargs)
|
super().__init__(hparams, **kwargs)
|
||||||
|
|
||||||
|
# Default hparams
|
||||||
|
self.hparams.setdefault("threshold", 0.5)
|
||||||
|
|
||||||
self.extractors = extractors
|
self.extractors = extractors
|
||||||
|
self.linear = torch.nn.Linear(in_features=len(extractors),
|
||||||
|
out_features=1,
|
||||||
|
bias=True)
|
||||||
|
|
||||||
def extract(self, x):
|
def extract(self, x):
|
||||||
"""Apply the local extractors batch-wise on features."""
|
"""Apply the local extractors batch-wise on features."""
|
||||||
@ -26,12 +33,13 @@ class BinaryNAM(ProtoTorchBolt):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
x = self.extract(x).sum(1)
|
x = self.extract(x)
|
||||||
return torch.nn.functional.sigmoid(x)
|
x = self.linear(x)
|
||||||
|
return torch.sigmoid(x)
|
||||||
|
|
||||||
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
||||||
x, y = batch
|
x, y = batch
|
||||||
preds = self(x)
|
preds = self(x).squeeze()
|
||||||
train_loss = torch.nn.functional.binary_cross_entropy(preds, y.float())
|
train_loss = torch.nn.functional.binary_cross_entropy(preds, y.float())
|
||||||
self.log("train_loss", train_loss)
|
self.log("train_loss", train_loss)
|
||||||
accuracy = torchmetrics.functional.accuracy(preds.int(), y.int())
|
accuracy = torchmetrics.functional.accuracy(preds.int(), y.int())
|
||||||
@ -42,3 +50,9 @@ class BinaryNAM(ProtoTorchBolt):
|
|||||||
prog_bar=True,
|
prog_bar=True,
|
||||||
logger=True)
|
logger=True)
|
||||||
return train_loss
|
return train_loss
|
||||||
|
|
||||||
|
def predict(self, x):
|
||||||
|
out = self(x)
|
||||||
|
pred = torch.zeros_like(out, device=self.device)
|
||||||
|
pred[out > self.hparams.threshold] = 1
|
||||||
|
return pred
|
||||||
|
@ -117,6 +117,24 @@ class Vis2DAbstract(pl.Callback):
|
|||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
|
class Vis2D(Vis2DAbstract):
|
||||||
|
def on_epoch_end(self, trainer, pl_module):
|
||||||
|
if not self.precheck(trainer):
|
||||||
|
return True
|
||||||
|
|
||||||
|
x_train, y_train = self.x_train, self.y_train
|
||||||
|
ax = self.setup_ax(xlabel="Data dimension 1",
|
||||||
|
ylabel="Data dimension 2")
|
||||||
|
self.plot_data(ax, x_train, y_train)
|
||||||
|
mesh_input, xx, yy = mesh2d(x_train, self.border, self.resolution)
|
||||||
|
mesh_input = torch.from_numpy(mesh_input).type_as(x_train)
|
||||||
|
y_pred = pl_module.predict(mesh_input)
|
||||||
|
y_pred = y_pred.cpu().reshape(xx.shape)
|
||||||
|
ax.contourf(xx, yy, y_pred, cmap=self.cmap, alpha=0.35)
|
||||||
|
|
||||||
|
self.log_and_display(trainer, pl_module)
|
||||||
|
|
||||||
|
|
||||||
class VisGLVQ2D(Vis2DAbstract):
|
class VisGLVQ2D(Vis2DAbstract):
|
||||||
def on_epoch_end(self, trainer, pl_module):
|
def on_epoch_end(self, trainer, pl_module):
|
||||||
if not self.precheck(trainer):
|
if not self.precheck(trainer):
|
||||||
|
Loading…
Reference in New Issue
Block a user