[FEATURE] Update pruning callback to re-add pruned prototypes
This commit is contained in:
parent
42d974e08c
commit
20471bfb1c
@ -30,7 +30,7 @@ if __name__ == "__main__":
|
||||
prototypes_per_class = num_clusters * 5
|
||||
hparams = dict(
|
||||
distribution=(num_classes, prototypes_per_class),
|
||||
lr=0.3,
|
||||
lr=0.1,
|
||||
)
|
||||
|
||||
# Initialize the model
|
||||
@ -39,24 +39,21 @@ if __name__ == "__main__":
|
||||
prototype_initializer=pt.components.Ones(2, scale=3),
|
||||
)
|
||||
|
||||
# Summary
|
||||
print(model)
|
||||
|
||||
# Callbacks
|
||||
vis = pt.models.VisGLVQ2D(train_ds)
|
||||
pruning = pt.models.PruneLoserPrototypes(
|
||||
threshold=0.01, # prune prototype if it wins less than 1%
|
||||
idle_epochs=10, # pruning too early may cause problems
|
||||
prune_quota_per_epoch=5, # prune at most 5 prototypes per epoch
|
||||
frequency=2, # prune every second epoch
|
||||
idle_epochs=20, # pruning too early may cause problems
|
||||
prune_quota_per_epoch=2, # prune at most 2 prototypes per epoch
|
||||
frequency=1, # prune every epoch
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
es = pl.callbacks.EarlyStopping(
|
||||
monitor="train_loss",
|
||||
min_delta=0.001,
|
||||
patience=15,
|
||||
patience=20,
|
||||
mode="min",
|
||||
verbose=True,
|
||||
check_on_train_epoch_end=True,
|
||||
)
|
||||
|
||||
@ -68,6 +65,7 @@ if __name__ == "__main__":
|
||||
pruning,
|
||||
es,
|
||||
],
|
||||
progress_bar_refresh_rate=0,
|
||||
terminate_on_nan=True,
|
||||
weights_summary=None,
|
||||
accelerator="ddp",
|
||||
|
@ -10,12 +10,16 @@ class PruneLoserPrototypes(pl.Callback):
|
||||
idle_epochs=10,
|
||||
prune_quota_per_epoch=-1,
|
||||
frequency=1,
|
||||
replace=False,
|
||||
initializer=None,
|
||||
verbose=False):
|
||||
self.threshold = threshold # minimum win ratio
|
||||
self.idle_epochs = idle_epochs # epochs to wait before pruning
|
||||
self.prune_quota_per_epoch = prune_quota_per_epoch
|
||||
self.frequency = frequency
|
||||
self.replace = replace
|
||||
self.verbose = verbose
|
||||
self.initializer = initializer
|
||||
|
||||
def on_epoch_end(self, trainer, pl_module):
|
||||
if (trainer.current_epoch + 1) < self.idle_epochs:
|
||||
@ -24,17 +28,29 @@ class PruneLoserPrototypes(pl.Callback):
|
||||
return None
|
||||
ratios = pl_module.prototype_win_ratios.mean(dim=0)
|
||||
to_prune = torch.arange(len(ratios))[ratios < self.threshold]
|
||||
prune_labels = pl_module.prototype_labels[to_prune.tolist()]
|
||||
if self.prune_quota_per_epoch > 0:
|
||||
to_prune = to_prune[:self.prune_quota_per_epoch]
|
||||
prune_labels = prune_labels[:self.prune_quota_per_epoch]
|
||||
if len(to_prune) > 0:
|
||||
if self.verbose:
|
||||
print(f"\nPrototype win ratios: {ratios}")
|
||||
print(f"Pruning prototypes at: {to_prune.tolist()}")
|
||||
cur_num_protos = pl_module.num_prototypes
|
||||
pl_module.remove_prototypes(indices=to_prune)
|
||||
if self.replace:
|
||||
if self.verbose:
|
||||
print(f"Re-adding prototypes at: {to_prune.tolist()}")
|
||||
labels, counts = torch.unique(prune_labels,
|
||||
sorted=True,
|
||||
return_counts=True)
|
||||
distribution = dict(zip(labels.tolist(), counts.tolist()))
|
||||
print(f"{distribution=}")
|
||||
pl_module.add_prototypes(distribution=distribution,
|
||||
initializer=self.initializer)
|
||||
new_num_protos = pl_module.num_prototypes
|
||||
if self.verbose:
|
||||
print(f"`num_prototypes` reduced from {cur_num_protos} "
|
||||
print(f"`num_prototypes` changed from {cur_num_protos} "
|
||||
f"to {new_num_protos}.")
|
||||
return True
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user