Compare commits
No commits in common. "master" and "refactor/springcleaning2021" have entirely different histories.
master
...
refactor/s
@ -1,10 +1,10 @@
|
|||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.7.6
|
current_version = 0.5.0
|
||||||
commit = True
|
commit = True
|
||||||
tag = True
|
tag = True
|
||||||
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)
|
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)
|
||||||
serialize = {major}.{minor}.{patch}
|
serialize =
|
||||||
message = build: bump version {current_version} → {new_version}
|
{major}.{minor}.{patch}
|
||||||
|
|
||||||
[bumpversion:file:setup.py]
|
[bumpversion:file:setup.py]
|
||||||
|
|
||||||
|
15
.codacy.yml
Normal file
15
.codacy.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# To validate the contents of your configuration file
|
||||||
|
# run the following command in the folder where the configuration file is located:
|
||||||
|
# codacy-analysis-cli validate-configuration --directory `pwd`
|
||||||
|
# To analyse, run:
|
||||||
|
# codacy-analysis-cli analyse --tool remark-lint --directory `pwd`
|
||||||
|
---
|
||||||
|
engines:
|
||||||
|
pylintpython3:
|
||||||
|
exclude_paths:
|
||||||
|
- config/engines.yml
|
||||||
|
remark-lint:
|
||||||
|
exclude_paths:
|
||||||
|
- config/engines.yml
|
||||||
|
exclude_paths:
|
||||||
|
- 'tests/**'
|
2
.codecov.yml
Normal file
2
.codecov.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
comment:
|
||||||
|
require_changes: yes
|
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -10,28 +10,21 @@ assignees: ''
|
|||||||
**Describe the bug**
|
**Describe the bug**
|
||||||
A clear and concise description of what the bug is.
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
**Steps to reproduce the behavior**
|
**To Reproduce**
|
||||||
1. ...
|
Steps to reproduce the behavior:
|
||||||
2. Run script '...' or this snippet:
|
1. Install Prototorch by running '...'
|
||||||
```python
|
2. Run script '...'
|
||||||
import prototorch as pt
|
|
||||||
|
|
||||||
...
|
|
||||||
```
|
|
||||||
3. See errors
|
3. See errors
|
||||||
|
|
||||||
**Expected behavior**
|
**Expected behavior**
|
||||||
A clear and concise description of what you expected to happen.
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
**Observed behavior**
|
|
||||||
A clear and concise description of what actually happened.
|
|
||||||
|
|
||||||
**Screenshots**
|
**Screenshots**
|
||||||
If applicable, add screenshots to help explain your problem.
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
**System and version information**
|
**Desktop (please complete the following information):**
|
||||||
- OS: [e.g. Ubuntu 20.10]
|
- OS: [e.g. Ubuntu 20.10]
|
||||||
- ProtoTorch Version: [e.g. 0.4.0]
|
- Prototorch Version: [e.g. v0.4.0]
|
||||||
- Python Version: [e.g. 3.9.5]
|
- Python Version: [e.g. 3.9.5]
|
||||||
|
|
||||||
**Additional context**
|
**Additional context**
|
||||||
|
86
.github/workflows/pythonapp.yml
vendored
86
.github/workflows/pythonapp.yml
vendored
@ -5,71 +5,33 @@ name: tests
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
branches: [ master, dev ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [master]
|
branches: [ master ]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
style:
|
build:
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python 3.11
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: "3.11"
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
|
||||||
pip install .[all]
|
|
||||||
- uses: pre-commit/action@v3.0.0
|
|
||||||
compatibility:
|
|
||||||
needs: style
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
|
||||||
os: [ubuntu-latest, windows-latest]
|
|
||||||
exclude:
|
|
||||||
- os: windows-latest
|
|
||||||
python-version: "3.8"
|
|
||||||
- os: windows-latest
|
|
||||||
python-version: "3.9"
|
|
||||||
- os: windows-latest
|
|
||||||
python-version: "3.10"
|
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
|
||||||
pip install .[all]
|
|
||||||
- name: Test with pytest
|
|
||||||
run: |
|
|
||||||
pytest
|
|
||||||
publish_pypi:
|
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
|
|
||||||
needs: compatibility
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python 3.10
|
- name: Set up Python 3.8
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v1
|
||||||
with:
|
with:
|
||||||
python-version: "3.11"
|
python-version: 3.8
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install .[all]
|
pip install .[all]
|
||||||
pip install wheel
|
- name: Lint with flake8
|
||||||
- name: Build package
|
run: |
|
||||||
run: python setup.py sdist bdist_wheel
|
pip install flake8
|
||||||
- name: Publish a Python distribution to PyPI
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
with:
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
user: __token__
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
pip install pytest
|
||||||
|
pytest
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -155,4 +155,4 @@ dmypy.json
|
|||||||
reports
|
reports
|
||||||
artifacts
|
artifacts
|
||||||
examples/_*.py
|
examples/_*.py
|
||||||
examples/_*.ipynb
|
examples/_*.ipynb
|
@ -1,53 +0,0 @@
|
|||||||
# See https://pre-commit.com for more information
|
|
||||||
# See https://pre-commit.com/hooks.html for more hooks
|
|
||||||
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.4.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-ast
|
|
||||||
- id: check-case-conflict
|
|
||||||
|
|
||||||
- repo: https://github.com/myint/autoflake
|
|
||||||
rev: v2.1.1
|
|
||||||
hooks:
|
|
||||||
- id: autoflake
|
|
||||||
|
|
||||||
- repo: http://github.com/PyCQA/isort
|
|
||||||
rev: 5.12.0
|
|
||||||
hooks:
|
|
||||||
- id: isort
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
|
||||||
rev: v1.3.0
|
|
||||||
hooks:
|
|
||||||
- id: mypy
|
|
||||||
files: prototorch
|
|
||||||
additional_dependencies: [types-pkg_resources]
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
|
||||||
rev: v0.32.0
|
|
||||||
hooks:
|
|
||||||
- id: yapf
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pygrep-hooks
|
|
||||||
rev: v1.10.0
|
|
||||||
hooks:
|
|
||||||
- id: python-use-type-annotations
|
|
||||||
- id: python-no-log-warn
|
|
||||||
- id: python-check-blanket-noqa
|
|
||||||
|
|
||||||
- repo: https://github.com/asottile/pyupgrade
|
|
||||||
rev: v3.7.0
|
|
||||||
hooks:
|
|
||||||
- id: pyupgrade
|
|
||||||
|
|
||||||
- repo: https://github.com/si-cim/gitlint
|
|
||||||
rev: v0.15.2-unofficial
|
|
||||||
hooks:
|
|
||||||
- id: gitlint
|
|
||||||
args: [--contrib=CT1, --ignore=B6, --msg-filename]
|
|
@ -19,7 +19,7 @@ formats: all
|
|||||||
|
|
||||||
# Optionally set the version of Python and requirements required to build your docs
|
# Optionally set the version of Python and requirements required to build your docs
|
||||||
python:
|
python:
|
||||||
version: 3.9
|
version: 3.8
|
||||||
install:
|
install:
|
||||||
- method: pip
|
- method: pip
|
||||||
path: .
|
path: .
|
||||||
|
36
.travis.yml
Normal file
36
.travis.yml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
dist: bionic
|
||||||
|
sudo: false
|
||||||
|
language: python
|
||||||
|
python: 3.9
|
||||||
|
cache:
|
||||||
|
directories:
|
||||||
|
- "$HOME/.cache/pip"
|
||||||
|
- "./tests/artifacts"
|
||||||
|
- "$HOME/datasets"
|
||||||
|
install:
|
||||||
|
- pip install .[all] --progress-bar off
|
||||||
|
|
||||||
|
# Generate code coverage report
|
||||||
|
script:
|
||||||
|
- coverage run -m pytest
|
||||||
|
|
||||||
|
# Push the results to codecov
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
||||||
|
|
||||||
|
# Publish on PyPI
|
||||||
|
deploy:
|
||||||
|
provider: pypi
|
||||||
|
username: __token__
|
||||||
|
password:
|
||||||
|
secure: rVQNCxKIuiEtMz4zLSsjdt6spG7cf3miKN5eqjxZfcELALHxAV4w/+CideQObOn3u9emmxb87R9XWKcogqK2MXqnuIcY4mWg7HUqaip1bhz/4YiVXjFILcG6itjX9IUF1DrtjKKRk6xryucSZcEB7yTcXz1hQTb768KWlLlKOVTRNwr7j07eyeafexz/L2ANQCqfOZgS4b0k2AMeDBRPykPULtyeneEFlb6MJZ2MxeqtTNVK4b/6VsQSZwQ9jGJNGWonn5Y287gHmzvEcymSJogTe2taxGBWawPnOsibws9v88DEAHdsEvYdnqEE3hFl0R5La2Lkjd8CjNUYegxioQ57i3WNS3iksq10ZLMCbH29lb9YPG7r6Y8z9H85735kV2gKLdf+o7SPS03TRgjSZKN6pn4pLG0VWkxC6l8VfLuJnRNTHX4g6oLQwOWIBbxybn9Zw/yLjAXAJNgBHt5v86H6Jfi1Va4AhEV6itkoH9IM3/uDhrE/mmorqyVled/CPNtBWNTyoDevLNxMUDnbuhH0JzLki+VOjKnTxEfq12JB8X9faFG5BjvU9oGjPPewrp5DGGzg6KDra7dikciWUxE1eTFFDhMyG1CFGcjKlDvlAGHyI6Kih35egGUeq+N/pitr2330ftM9Dm4rWpOTxPyCI89bXKssx/MgmLG7kSM=
|
||||||
|
on:
|
||||||
|
tags: true
|
||||||
|
skip_existing: true
|
||||||
|
|
||||||
|
# The password is encrypted with:
|
||||||
|
# `cd prototorch && travis encrypt your-pypi-api-token --add deploy.password`
|
||||||
|
# See https://docs.travis-ci.com/user/deployment/pypi and
|
||||||
|
# https://github.com/travis-ci/travis.rb#installation
|
||||||
|
# for more details
|
||||||
|
# Note: The encrypt command does not work well in ZSH.
|
3
LICENSE
3
LICENSE
@ -1,7 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2020 Saxon Institute for Computational Intelligence and Machine
|
Copyright (c) 2020 si-cim
|
||||||
Learning (SICIM)
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
21
README.md
21
README.md
@ -2,9 +2,13 @@
|
|||||||
|
|
||||||
![ProtoTorch Logo](https://prototorch.readthedocs.io/en/latest/_static/horizontal-lockup.png)
|
![ProtoTorch Logo](https://prototorch.readthedocs.io/en/latest/_static/horizontal-lockup.png)
|
||||||
|
|
||||||
|
[![Build Status](https://travis-ci.org/si-cim/prototorch.svg?branch=master)](https://travis-ci.org/si-cim/prototorch)
|
||||||
![tests](https://github.com/si-cim/prototorch/workflows/tests/badge.svg)
|
![tests](https://github.com/si-cim/prototorch/workflows/tests/badge.svg)
|
||||||
[![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/si-cim/prototorch?color=yellow&label=version)](https://github.com/si-cim/prototorch/releases)
|
[![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/si-cim/prototorch?color=yellow&label=version)](https://github.com/si-cim/prototorch/releases)
|
||||||
[![PyPI](https://img.shields.io/pypi/v/prototorch)](https://pypi.org/project/prototorch/)
|
[![PyPI](https://img.shields.io/pypi/v/prototorch)](https://pypi.org/project/prototorch/)
|
||||||
|
[![codecov](https://codecov.io/gh/si-cim/prototorch/branch/master/graph/badge.svg)](https://codecov.io/gh/si-cim/prototorch)
|
||||||
|
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/76273904bf9343f0a8b29cd8aca242e7)](https://www.codacy.com/gh/si-cim/prototorch?utm_source=github.com&utm_medium=referral&utm_content=si-cim/prototorch&utm_campaign=Badge_Grade)
|
||||||
|
![PyPI - Downloads](https://img.shields.io/pypi/dm/prototorch?color=blue)
|
||||||
[![GitHub license](https://img.shields.io/github/license/si-cim/prototorch)](https://github.com/si-cim/prototorch/blob/master/LICENSE)
|
[![GitHub license](https://img.shields.io/github/license/si-cim/prototorch)](https://github.com/si-cim/prototorch/blob/master/LICENSE)
|
||||||
|
|
||||||
*Tensorflow users, see:* [ProtoFlow](https://github.com/si-cim/protoflow)
|
*Tensorflow users, see:* [ProtoFlow](https://github.com/si-cim/protoflow)
|
||||||
@ -44,23 +48,6 @@ pip install -e .[all]
|
|||||||
The documentation is available at <https://www.prototorch.ml/en/latest/>. Should
|
The documentation is available at <https://www.prototorch.ml/en/latest/>. Should
|
||||||
that link not work try <https://prototorch.readthedocs.io/en/latest/>.
|
that link not work try <https://prototorch.readthedocs.io/en/latest/>.
|
||||||
|
|
||||||
## Contribution
|
|
||||||
|
|
||||||
This repository contains definition for [git hooks](https://githooks.com).
|
|
||||||
[Pre-commit](https://pre-commit.com) is automatically installed as development
|
|
||||||
dependency with prototorch or you can install it manually with `pip install
|
|
||||||
pre-commit`.
|
|
||||||
|
|
||||||
Please install the hooks by running:
|
|
||||||
```bash
|
|
||||||
pre-commit install
|
|
||||||
pre-commit install --hook-type commit-msg
|
|
||||||
```
|
|
||||||
before creating the first commit.
|
|
||||||
|
|
||||||
The commit will fail if the commit message does not follow the specification
|
|
||||||
provided [here](https://www.conventionalcommits.org/en/v1.0.0/#specification).
|
|
||||||
|
|
||||||
## Bibtex
|
## Bibtex
|
||||||
|
|
||||||
If you would like to cite the package, please use this:
|
If you would like to cite the package, please use this:
|
||||||
|
@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
|
|||||||
|
|
||||||
# The full version, including alpha/beta/rc tags
|
# The full version, including alpha/beta/rc tags
|
||||||
#
|
#
|
||||||
release = "0.7.6"
|
release = "0.5.0"
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ html_css_files = [
|
|||||||
# -- Options for HTMLHelp output ------------------------------------------
|
# -- Options for HTMLHelp output ------------------------------------------
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
# Output file base name for HTML help builder.
|
||||||
htmlhelp_basename = "prototorchdoc"
|
htmlhelp_basename = "protoflowdoc"
|
||||||
|
|
||||||
# -- Options for LaTeX output ---------------------------------------------
|
# -- Options for LaTeX output ---------------------------------------------
|
||||||
|
|
||||||
|
@ -1,100 +0,0 @@
|
|||||||
"""ProtoTorch CBC example using 2D Iris data."""
|
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
|
|
||||||
import prototorch as pt
|
|
||||||
|
|
||||||
|
|
||||||
class CBC(torch.nn.Module):
|
|
||||||
|
|
||||||
def __init__(self, data, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self.components_layer = pt.components.ReasoningComponents(
|
|
||||||
distribution=[2, 1, 2],
|
|
||||||
components_initializer=pt.initializers.SSCI(data, noise=0.1),
|
|
||||||
reasonings_initializer=pt.initializers.PPRI(components_first=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
components, reasonings = self.components_layer()
|
|
||||||
sims = pt.similarities.euclidean_similarity(x, components)
|
|
||||||
probs = pt.competitions.cbcc(sims, reasonings)
|
|
||||||
return probs
|
|
||||||
|
|
||||||
|
|
||||||
class VisCBC2D():
|
|
||||||
|
|
||||||
def __init__(self, model, data):
|
|
||||||
self.model = model
|
|
||||||
self.x_train, self.y_train = pt.utils.parse_data_arg(data)
|
|
||||||
self.title = "Components Visualization"
|
|
||||||
self.fig = plt.figure(self.title)
|
|
||||||
self.border = 0.1
|
|
||||||
self.resolution = 100
|
|
||||||
self.cmap = "viridis"
|
|
||||||
|
|
||||||
def on_train_epoch_end(self):
|
|
||||||
x_train, y_train = self.x_train, self.y_train
|
|
||||||
_components = self.model.components_layer._components.detach()
|
|
||||||
ax = self.fig.gca()
|
|
||||||
ax.cla()
|
|
||||||
ax.set_title(self.title)
|
|
||||||
ax.axis("off")
|
|
||||||
ax.scatter(
|
|
||||||
x_train[:, 0],
|
|
||||||
x_train[:, 1],
|
|
||||||
c=y_train,
|
|
||||||
cmap=self.cmap,
|
|
||||||
edgecolor="k",
|
|
||||||
marker="o",
|
|
||||||
s=30,
|
|
||||||
)
|
|
||||||
ax.scatter(
|
|
||||||
_components[:, 0],
|
|
||||||
_components[:, 1],
|
|
||||||
c="w",
|
|
||||||
cmap=self.cmap,
|
|
||||||
edgecolor="k",
|
|
||||||
marker="D",
|
|
||||||
s=50,
|
|
||||||
)
|
|
||||||
x = torch.vstack((x_train, _components))
|
|
||||||
mesh_input, xx, yy = pt.utils.mesh2d(x, self.border, self.resolution)
|
|
||||||
with torch.no_grad():
|
|
||||||
y_pred = self.model(
|
|
||||||
torch.Tensor(mesh_input).type_as(_components)).argmax(1)
|
|
||||||
y_pred = y_pred.cpu().reshape(xx.shape)
|
|
||||||
ax.contourf(xx, yy, y_pred, cmap=self.cmap, alpha=0.35)
|
|
||||||
plt.pause(0.2)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
train_ds = pt.datasets.Iris(dims=[0, 2])
|
|
||||||
|
|
||||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=32)
|
|
||||||
|
|
||||||
model = CBC(train_ds)
|
|
||||||
|
|
||||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
|
||||||
criterion = pt.losses.MarginLoss(margin=0.1)
|
|
||||||
vis = VisCBC2D(model, train_ds)
|
|
||||||
|
|
||||||
for epoch in range(200):
|
|
||||||
correct = 0.0
|
|
||||||
for x, y in train_loader:
|
|
||||||
y_oh = torch.eye(3)[y]
|
|
||||||
y_pred = model(x)
|
|
||||||
loss = criterion(y_pred, y_oh).mean(0)
|
|
||||||
|
|
||||||
optimizer.zero_grad()
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
|
|
||||||
correct += (y_pred.argmax(1) == y).float().sum(0)
|
|
||||||
|
|
||||||
acc = 100 * correct / len(train_ds)
|
|
||||||
logging.info(f"Epoch: {epoch} Accuracy: {acc:05.02f}%")
|
|
||||||
vis.on_train_epoch_end()
|
|
120
examples/glvq_iris.py
Normal file
120
examples/glvq_iris.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
"""ProtoTorch GLVQ example using 2D Iris data."""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||||
|
from prototorch.functions.competitions import wtac
|
||||||
|
from prototorch.functions.distances import euclidean_distance
|
||||||
|
from prototorch.modules.losses import GLVQLoss
|
||||||
|
from sklearn.datasets import load_iris
|
||||||
|
from sklearn.preprocessing import StandardScaler
|
||||||
|
from torchinfo import summary
|
||||||
|
|
||||||
|
# Prepare and preprocess the data
|
||||||
|
scaler = StandardScaler()
|
||||||
|
x_train, y_train = load_iris(return_X_y=True)
|
||||||
|
x_train = x_train[:, [0, 2]]
|
||||||
|
scaler.fit(x_train)
|
||||||
|
x_train = scaler.transform(x_train)
|
||||||
|
|
||||||
|
|
||||||
|
# Define the GLVQ model
|
||||||
|
class Model(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
"""GLVQ model for training on 2D Iris data."""
|
||||||
|
super().__init__()
|
||||||
|
prototype_initializer = StratifiedMeanInitializer([x_train, y_train])
|
||||||
|
prototype_distribution = {"num_classes": 3, "prototypes_per_class": 3}
|
||||||
|
self.proto_layer = LabeledComponents(
|
||||||
|
prototype_distribution,
|
||||||
|
prototype_initializer,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
prototypes, prototype_labels = self.proto_layer()
|
||||||
|
distances = euclidean_distance(x, prototypes)
|
||||||
|
return distances, prototype_labels
|
||||||
|
|
||||||
|
|
||||||
|
# Build the GLVQ model
|
||||||
|
model = Model()
|
||||||
|
|
||||||
|
# Print summary using torchinfo (might be buggy/incorrect)
|
||||||
|
print(summary(model))
|
||||||
|
|
||||||
|
# Optimize using SGD optimizer from `torch.optim`
|
||||||
|
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
|
||||||
|
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||||
|
|
||||||
|
x_in = torch.Tensor(x_train)
|
||||||
|
y_in = torch.Tensor(y_train)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
TITLE = "Prototype Visualization"
|
||||||
|
fig = plt.figure(TITLE)
|
||||||
|
for epoch in range(70):
|
||||||
|
# Compute loss
|
||||||
|
distances, prototype_labels = model(x_in)
|
||||||
|
loss = criterion([distances, prototype_labels], y_in)
|
||||||
|
|
||||||
|
# Compute Accuracy
|
||||||
|
with torch.no_grad():
|
||||||
|
predictions = wtac(distances, prototype_labels)
|
||||||
|
correct = predictions.eq(y_in.view_as(predictions)).sum().item()
|
||||||
|
acc = 100.0 * correct / len(x_train)
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} Acc: {acc:05.02f}%"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Optimizer step
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Get the prototypes form the model
|
||||||
|
prototypes = model.proto_layer.components.numpy()
|
||||||
|
if np.isnan(np.sum(prototypes)):
|
||||||
|
print("Stopping training because of `nan` in prototypes.")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Visualize the data and the prototypes
|
||||||
|
ax = fig.gca()
|
||||||
|
ax.cla()
|
||||||
|
ax.set_title(TITLE)
|
||||||
|
ax.set_xlabel("Data dimension 1")
|
||||||
|
ax.set_ylabel("Data dimension 2")
|
||||||
|
cmap = "viridis"
|
||||||
|
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
|
||||||
|
ax.scatter(
|
||||||
|
prototypes[:, 0],
|
||||||
|
prototypes[:, 1],
|
||||||
|
c=prototype_labels,
|
||||||
|
cmap=cmap,
|
||||||
|
edgecolor="k",
|
||||||
|
marker="D",
|
||||||
|
s=50,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Paint decision regions
|
||||||
|
x = np.vstack((x_train, prototypes))
|
||||||
|
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
|
||||||
|
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
|
||||||
|
xx, yy = np.meshgrid(np.arange(x_min, x_max, 1 / 50),
|
||||||
|
np.arange(y_min, y_max, 1 / 50))
|
||||||
|
mesh_input = np.c_[xx.ravel(), yy.ravel()]
|
||||||
|
|
||||||
|
torch_input = torch.Tensor(mesh_input)
|
||||||
|
d = model(torch_input)[0]
|
||||||
|
w_indices = torch.argmin(d, dim=1)
|
||||||
|
y_pred = torch.index_select(prototype_labels, 0, w_indices)
|
||||||
|
y_pred = y_pred.reshape(xx.shape)
|
||||||
|
|
||||||
|
# Plot voronoi regions
|
||||||
|
ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)
|
||||||
|
|
||||||
|
ax.set_xlim(left=x_min + 0, right=x_max - 0)
|
||||||
|
ax.set_ylim(bottom=y_min + 0, top=y_max - 0)
|
||||||
|
|
||||||
|
plt.pause(0.1)
|
@ -1,76 +0,0 @@
|
|||||||
"""ProtoTorch GMLVQ example using Iris data."""
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
import prototorch as pt
|
|
||||||
|
|
||||||
|
|
||||||
class GMLVQ(torch.nn.Module):
|
|
||||||
"""
|
|
||||||
Implementation of Generalized Matrix Learning Vector Quantization.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, data, **kwargs):
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
self.components_layer = pt.components.LabeledComponents(
|
|
||||||
distribution=[1, 1, 1],
|
|
||||||
components_initializer=pt.initializers.SMCI(data, noise=0.1),
|
|
||||||
)
|
|
||||||
|
|
||||||
self.backbone = pt.transforms.Omega(
|
|
||||||
len(data[0][0]),
|
|
||||||
len(data[0][0]),
|
|
||||||
pt.initializers.RandomLinearTransformInitializer(),
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, data):
|
|
||||||
"""
|
|
||||||
Forward function that returns a tuple of dissimilarities and label information.
|
|
||||||
Feed into GLVQLoss to get a complete GMLVQ model.
|
|
||||||
"""
|
|
||||||
components, label = self.components_layer()
|
|
||||||
|
|
||||||
latent_x = self.backbone(data)
|
|
||||||
latent_components = self.backbone(components)
|
|
||||||
|
|
||||||
distance = pt.distances.squared_euclidean_distance(
|
|
||||||
latent_x, latent_components)
|
|
||||||
|
|
||||||
return distance, label
|
|
||||||
|
|
||||||
def predict(self, data):
|
|
||||||
"""
|
|
||||||
The GMLVQ has a modified prediction step, where a competition layer is applied.
|
|
||||||
"""
|
|
||||||
components, label = self.components_layer()
|
|
||||||
distance = pt.distances.squared_euclidean_distance(data, components)
|
|
||||||
winning_label = pt.competitions.wtac(distance, label)
|
|
||||||
return winning_label
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
train_ds = pt.datasets.Iris()
|
|
||||||
|
|
||||||
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=32)
|
|
||||||
|
|
||||||
model = GMLVQ(train_ds)
|
|
||||||
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
|
|
||||||
criterion = pt.losses.GLVQLoss()
|
|
||||||
|
|
||||||
for epoch in range(200):
|
|
||||||
correct = 0.0
|
|
||||||
for x, y in train_loader:
|
|
||||||
d, labels = model(x)
|
|
||||||
loss = criterion(d, y, labels).mean(0)
|
|
||||||
|
|
||||||
optimizer.zero_grad()
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
y_pred = model.predict(x)
|
|
||||||
correct += (y_pred == y).float().sum(0)
|
|
||||||
|
|
||||||
acc = 100 * correct / len(train_ds)
|
|
||||||
print(f"Epoch: {epoch} Accuracy: {acc:05.02f}%")
|
|
103
examples/gmlvq_tecator.py
Normal file
103
examples/gmlvq_tecator.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
"""ProtoTorch "siamese" GMLVQ example using Tecator."""
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import torch
|
||||||
|
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||||
|
from prototorch.datasets.tecator import Tecator
|
||||||
|
from prototorch.functions.distances import sed
|
||||||
|
from prototorch.modules.losses import GLVQLoss
|
||||||
|
from prototorch.utils.colors import get_legend_handles
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
|
||||||
|
# Prepare the dataset and dataloader
|
||||||
|
train_data = Tecator(root="./artifacts", train=True)
|
||||||
|
train_loader = DataLoader(train_data, batch_size=128, shuffle=True)
|
||||||
|
|
||||||
|
|
||||||
|
class Model(torch.nn.Module):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
"""GMLVQ model as a siamese network."""
|
||||||
|
super().__init__()
|
||||||
|
prototype_initializer = StratifiedMeanInitializer(train_loader)
|
||||||
|
prototype_distribution = {"num_classes": 2, "prototypes_per_class": 2}
|
||||||
|
|
||||||
|
self.proto_layer = LabeledComponents(
|
||||||
|
prototype_distribution,
|
||||||
|
prototype_initializer,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.omega = torch.nn.Linear(in_features=100,
|
||||||
|
out_features=100,
|
||||||
|
bias=False)
|
||||||
|
torch.nn.init.eye_(self.omega.weight)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
protos = self.proto_layer.components
|
||||||
|
plabels = self.proto_layer.component_labels
|
||||||
|
|
||||||
|
# Process `x` and `protos` through `omega`
|
||||||
|
x_map = self.omega(x)
|
||||||
|
protos_map = self.omega(protos)
|
||||||
|
|
||||||
|
# Compute distances and output
|
||||||
|
dis = sed(x_map, protos_map)
|
||||||
|
return dis, plabels
|
||||||
|
|
||||||
|
|
||||||
|
# Build the GLVQ model
|
||||||
|
model = Model()
|
||||||
|
|
||||||
|
# Print a summary of the model
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
# Optimize using Adam optimizer from `torch.optim`
|
||||||
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.001_0)
|
||||||
|
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=75, gamma=0.1)
|
||||||
|
criterion = GLVQLoss(squashing="identity", beta=10)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
for epoch in range(150):
|
||||||
|
epoch_loss = 0.0 # zero-out epoch loss
|
||||||
|
optimizer.zero_grad() # zero-out gradients
|
||||||
|
for xb, yb in train_loader:
|
||||||
|
# Compute loss
|
||||||
|
distances, plabels = model(xb)
|
||||||
|
loss = criterion([distances, plabels], yb)
|
||||||
|
epoch_loss += loss.item()
|
||||||
|
# Backprop
|
||||||
|
loss.backward()
|
||||||
|
# Take a gradient descent step
|
||||||
|
optimizer.step()
|
||||||
|
scheduler.step()
|
||||||
|
|
||||||
|
lr = optimizer.param_groups[0]["lr"]
|
||||||
|
print(f"Epoch: {epoch + 1:03d} Loss: {epoch_loss:06.02f} lr: {lr:07.06f}")
|
||||||
|
|
||||||
|
# Get the omega matrix form the model
|
||||||
|
omega = model.omega.weight.data.numpy().T
|
||||||
|
|
||||||
|
# Visualize the lambda matrix
|
||||||
|
title = "Lambda Matrix Visualization"
|
||||||
|
fig = plt.figure(title)
|
||||||
|
ax = fig.gca()
|
||||||
|
ax.set_title(title)
|
||||||
|
im = ax.imshow(omega.dot(omega.T), cmap="viridis")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
# Get the prototypes form the model
|
||||||
|
protos = model.proto_layer.components.numpy()
|
||||||
|
plabels = model.proto_layer.component_labels.numpy()
|
||||||
|
|
||||||
|
# Visualize the prototypes
|
||||||
|
title = "Tecator Prototypes"
|
||||||
|
fig = plt.figure(title)
|
||||||
|
ax = fig.gca()
|
||||||
|
ax.set_title(title)
|
||||||
|
ax.set_xlabel("Spectral frequencies")
|
||||||
|
ax.set_ylabel("Absorption")
|
||||||
|
clabels = ["Class 0 - Low fat", "Class 1 - High fat"]
|
||||||
|
handles, colors = get_legend_handles(clabels, marker="line", zero_indexed=True)
|
||||||
|
for x, y in zip(protos, plabels):
|
||||||
|
ax.plot(x, c=colors[int(y)])
|
||||||
|
ax.legend(handles, clabels)
|
||||||
|
plt.show()
|
183
examples/gtlvq_mnist.py
Normal file
183
examples/gtlvq_mnist.py
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
"""
|
||||||
|
ProtoTorch GTLVQ example using MNIST data.
|
||||||
|
The GTLVQ is placed as an classification model on
|
||||||
|
top of a CNN, considered as featurer extractor.
|
||||||
|
Initialization of subpsace and prototypes in
|
||||||
|
Siamnese fashion
|
||||||
|
For more info about GTLVQ see:
|
||||||
|
DOI:10.1109/IJCNN.2016.7727534
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torchvision
|
||||||
|
from prototorch.functions.helper import calculate_prototype_accuracy
|
||||||
|
from prototorch.modules.losses import GLVQLoss
|
||||||
|
from prototorch.modules.models import GTLVQ
|
||||||
|
from torchvision import transforms
|
||||||
|
|
||||||
|
# Parameters and options
|
||||||
|
num_epochs = 50
|
||||||
|
batch_size_train = 64
|
||||||
|
batch_size_test = 1000
|
||||||
|
learning_rate = 0.1
|
||||||
|
momentum = 0.5
|
||||||
|
log_interval = 10
|
||||||
|
cuda = "cuda:0"
|
||||||
|
random_seed = 1
|
||||||
|
device = torch.device(cuda if torch.cuda.is_available() else "cpu")
|
||||||
|
|
||||||
|
# Configures reproducability
|
||||||
|
torch.manual_seed(random_seed)
|
||||||
|
np.random.seed(random_seed)
|
||||||
|
|
||||||
|
# Prepare and preprocess the data
|
||||||
|
train_loader = torch.utils.data.DataLoader(
|
||||||
|
torchvision.datasets.MNIST(
|
||||||
|
"./files/",
|
||||||
|
train=True,
|
||||||
|
download=True,
|
||||||
|
transform=torchvision.transforms.Compose([
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.1307, ), (0.3081, ))
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
batch_size=batch_size_train,
|
||||||
|
shuffle=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
test_loader = torch.utils.data.DataLoader(
|
||||||
|
torchvision.datasets.MNIST(
|
||||||
|
"./files/",
|
||||||
|
train=False,
|
||||||
|
download=True,
|
||||||
|
transform=torchvision.transforms.Compose([
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.1307, ), (0.3081, ))
|
||||||
|
]),
|
||||||
|
),
|
||||||
|
batch_size=batch_size_test,
|
||||||
|
shuffle=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Define the GLVQ model plus appropriate feature extractor
|
||||||
|
class CNNGTLVQ(torch.nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
num_classes,
|
||||||
|
subspace_data,
|
||||||
|
prototype_data,
|
||||||
|
tangent_projection_type="local",
|
||||||
|
prototypes_per_class=2,
|
||||||
|
bottleneck_dim=128,
|
||||||
|
):
|
||||||
|
super(CNNGTLVQ, self).__init__()
|
||||||
|
|
||||||
|
# Feature Extractor - Simple CNN
|
||||||
|
self.fe = nn.Sequential(
|
||||||
|
nn.Conv2d(1, 32, 3, 1),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Conv2d(32, 64, 3, 1),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.MaxPool2d(2),
|
||||||
|
nn.Dropout(0.25),
|
||||||
|
nn.Flatten(),
|
||||||
|
nn.Linear(9216, bottleneck_dim),
|
||||||
|
nn.Dropout(0.5),
|
||||||
|
nn.LeakyReLU(),
|
||||||
|
nn.LayerNorm(bottleneck_dim),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Forward pass of subspace and prototype initialization data through feature extractor
|
||||||
|
subspace_data = self.fe(subspace_data)
|
||||||
|
prototype_data[0] = self.fe(prototype_data[0])
|
||||||
|
|
||||||
|
# Initialization of GTLVQ
|
||||||
|
self.gtlvq = GTLVQ(
|
||||||
|
num_classes,
|
||||||
|
subspace_data,
|
||||||
|
prototype_data,
|
||||||
|
tangent_projection_type=tangent_projection_type,
|
||||||
|
feature_dim=bottleneck_dim,
|
||||||
|
prototypes_per_class=prototypes_per_class,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
# Feature Extraction
|
||||||
|
x = self.fe(x)
|
||||||
|
|
||||||
|
# GTLVQ Forward pass
|
||||||
|
dis = self.gtlvq(x)
|
||||||
|
return dis
|
||||||
|
|
||||||
|
|
||||||
|
# Get init data
|
||||||
|
subspace_data = torch.cat(
|
||||||
|
[next(iter(train_loader))[0],
|
||||||
|
next(iter(test_loader))[0]])
|
||||||
|
prototype_data = next(iter(train_loader))
|
||||||
|
|
||||||
|
# Build the CNN GTLVQ model
|
||||||
|
model = CNNGTLVQ(
|
||||||
|
10,
|
||||||
|
subspace_data,
|
||||||
|
prototype_data,
|
||||||
|
tangent_projection_type="local",
|
||||||
|
bottleneck_dim=128,
|
||||||
|
).to(device)
|
||||||
|
|
||||||
|
# Optimize using SGD optimizer from `torch.optim`
|
||||||
|
optimizer = torch.optim.Adam(
|
||||||
|
[{
|
||||||
|
"params": model.fe.parameters()
|
||||||
|
}, {
|
||||||
|
"params": model.gtlvq.parameters()
|
||||||
|
}],
|
||||||
|
lr=learning_rate,
|
||||||
|
)
|
||||||
|
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
for batch_idx, (x_train, y_train) in enumerate(train_loader):
|
||||||
|
model.train()
|
||||||
|
x_train, y_train = x_train.to(device), y_train.to(device)
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
distances = model(x_train)
|
||||||
|
plabels = model.gtlvq.cls.component_labels.to(device)
|
||||||
|
|
||||||
|
# Compute loss.
|
||||||
|
loss = criterion([distances, plabels], y_train)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# GTLVQ uses projected SGD, which means to orthogonalize the subspaces after every gradient update.
|
||||||
|
model.gtlvq.orthogonalize_subspace()
|
||||||
|
|
||||||
|
if batch_idx % log_interval == 0:
|
||||||
|
acc = calculate_prototype_accuracy(distances, y_train, plabels)
|
||||||
|
print(
|
||||||
|
f"Epoch: {epoch + 1:02d}/{num_epochs:02d} Epoch Progress: {100. * batch_idx / len(train_loader):02.02f} % Loss: {loss.item():02.02f} \
|
||||||
|
Train Acc: {acc.item():02.02f}")
|
||||||
|
|
||||||
|
# Test
|
||||||
|
with torch.no_grad():
|
||||||
|
model.eval()
|
||||||
|
correct = 0
|
||||||
|
total = 0
|
||||||
|
for x_test, y_test in test_loader:
|
||||||
|
x_test, y_test = x_test.to(device), y_test.to(device)
|
||||||
|
test_distances = model(torch.tensor(x_test))
|
||||||
|
test_plabels = model.gtlvq.cls.prototype_labels.to(device)
|
||||||
|
i = torch.argmin(test_distances, 1)
|
||||||
|
correct += torch.sum(y_test == test_plabels[i])
|
||||||
|
total += y_test.size(0)
|
||||||
|
print("Accuracy of the network on the test images: %d %%" %
|
||||||
|
(torch.true_divide(correct, total) * 100))
|
||||||
|
|
||||||
|
# Save the model
|
||||||
|
PATH = "./glvq_mnist_model.pth"
|
||||||
|
torch.save(model.state_dict(), PATH)
|
108
examples/lgmlvq_iris.py
Normal file
108
examples/lgmlvq_iris.py
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
"""ProtoTorch LGMLVQ example using 2D Iris data."""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from prototorch.components import LabeledComponents, StratifiedMeanInitializer
|
||||||
|
from prototorch.functions.competitions import stratified_min
|
||||||
|
from prototorch.functions.distances import lomega_distance
|
||||||
|
from prototorch.modules.losses import GLVQLoss
|
||||||
|
from sklearn.datasets import load_iris
|
||||||
|
from sklearn.metrics import accuracy_score
|
||||||
|
|
||||||
|
# Prepare training data
|
||||||
|
x_train, y_train = load_iris(True)
|
||||||
|
x_train = x_train[:, [0, 2]]
|
||||||
|
|
||||||
|
|
||||||
|
# Define the model
|
||||||
|
class Model(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
"""Local-GMLVQ model."""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
prototype_initializer = StratifiedMeanInitializer([x_train, y_train])
|
||||||
|
prototype_distribution = [1, 2, 2]
|
||||||
|
self.proto_layer = LabeledComponents(
|
||||||
|
prototype_distribution,
|
||||||
|
prototype_initializer,
|
||||||
|
)
|
||||||
|
|
||||||
|
omegas = torch.eye(2, 2).repeat(5, 1, 1)
|
||||||
|
self.omegas = torch.nn.Parameter(omegas)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
protos, plabels = self.proto_layer()
|
||||||
|
omegas = self.omegas
|
||||||
|
dis = lomega_distance(x, protos, omegas)
|
||||||
|
return dis, plabels
|
||||||
|
|
||||||
|
|
||||||
|
# Build the model
|
||||||
|
model = Model()
|
||||||
|
|
||||||
|
# Optimize using Adam optimizer from `torch.optim`
|
||||||
|
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
|
||||||
|
criterion = GLVQLoss(squashing="sigmoid_beta", beta=10)
|
||||||
|
|
||||||
|
x_in = torch.Tensor(x_train)
|
||||||
|
y_in = torch.Tensor(y_train)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
title = "Prototype Visualization"
|
||||||
|
fig = plt.figure(title)
|
||||||
|
for epoch in range(100):
|
||||||
|
# Compute loss
|
||||||
|
dis, plabels = model(x_in)
|
||||||
|
loss = criterion([dis, plabels], y_in)
|
||||||
|
y_pred = np.argmin(stratified_min(dis, plabels).detach().numpy(), axis=1)
|
||||||
|
acc = accuracy_score(y_train, y_pred)
|
||||||
|
log_string = f"Epoch: {epoch + 1:03d} Loss: {loss.item():05.02f} "
|
||||||
|
log_string += f"Acc: {acc * 100:05.02f}%"
|
||||||
|
print(log_string)
|
||||||
|
|
||||||
|
# Take a gradient descent step
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# Get the prototypes form the model
|
||||||
|
protos = model.proto_layer.components.numpy()
|
||||||
|
|
||||||
|
# Visualize the data and the prototypes
|
||||||
|
ax = fig.gca()
|
||||||
|
ax.cla()
|
||||||
|
ax.set_title(title)
|
||||||
|
ax.set_xlabel("Data dimension 1")
|
||||||
|
ax.set_ylabel("Data dimension 2")
|
||||||
|
cmap = "viridis"
|
||||||
|
ax.scatter(x_train[:, 0], x_train[:, 1], c=y_train, edgecolor="k")
|
||||||
|
ax.scatter(
|
||||||
|
protos[:, 0],
|
||||||
|
protos[:, 1],
|
||||||
|
c=plabels,
|
||||||
|
cmap=cmap,
|
||||||
|
edgecolor="k",
|
||||||
|
marker="D",
|
||||||
|
s=50,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Paint decision regions
|
||||||
|
x = np.vstack((x_train, protos))
|
||||||
|
x_min, x_max = x[:, 0].min() - 1, x[:, 0].max() + 1
|
||||||
|
y_min, y_max = x[:, 1].min() - 1, x[:, 1].max() + 1
|
||||||
|
xx, yy = np.meshgrid(np.arange(x_min, x_max, 1 / 50),
|
||||||
|
np.arange(y_min, y_max, 1 / 50))
|
||||||
|
mesh_input = np.c_[xx.ravel(), yy.ravel()]
|
||||||
|
|
||||||
|
d, plabels = model(torch.Tensor(mesh_input))
|
||||||
|
y_pred = np.argmin(stratified_min(d, plabels).detach().numpy(), axis=1)
|
||||||
|
y_pred = y_pred.reshape(xx.shape)
|
||||||
|
|
||||||
|
# Plot voronoi regions
|
||||||
|
ax.contourf(xx, yy, y_pred, cmap=cmap, alpha=0.35)
|
||||||
|
|
||||||
|
ax.set_xlim(left=x_min + 0, right=x_max - 0)
|
||||||
|
ax.set_ylim(bottom=y_min + 0, top=y_max - 0)
|
||||||
|
|
||||||
|
plt.pause(0.1)
|
@ -4,20 +4,24 @@ import pkgutil
|
|||||||
|
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
from . import datasets # noqa: F401
|
from . import (
|
||||||
from . import nn # noqa: F401
|
datasets,
|
||||||
from . import utils # noqa: F401
|
nn,
|
||||||
from .core import competitions # noqa: F401
|
utils,
|
||||||
from .core import components # noqa: F401
|
)
|
||||||
from .core import distances # noqa: F401
|
from .core import (
|
||||||
from .core import initializers # noqa: F401
|
competitions,
|
||||||
from .core import losses # noqa: F401
|
components,
|
||||||
from .core import pooling # noqa: F401
|
distances,
|
||||||
from .core import similarities # noqa: F401
|
initializers,
|
||||||
from .core import transforms # noqa: F401
|
losses,
|
||||||
|
pooling,
|
||||||
|
similarities,
|
||||||
|
transforms,
|
||||||
|
)
|
||||||
|
|
||||||
# Core Setup
|
# Core Setup
|
||||||
__version__ = "0.7.6"
|
__version__ = "0.5.0"
|
||||||
|
|
||||||
__all_core__ = [
|
__all_core__ = [
|
||||||
"competitions",
|
"competitions",
|
||||||
|
@ -3,7 +3,8 @@
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
|
||||||
def wtac(distances: torch.Tensor, labels: torch.LongTensor):
|
def wtac(distances: torch.Tensor,
|
||||||
|
labels: torch.LongTensor) -> (torch.LongTensor):
|
||||||
"""Winner-Takes-All-Competition.
|
"""Winner-Takes-All-Competition.
|
||||||
|
|
||||||
Returns the labels corresponding to the winners.
|
Returns the labels corresponding to the winners.
|
||||||
@ -14,7 +15,9 @@ def wtac(distances: torch.Tensor, labels: torch.LongTensor):
|
|||||||
return winning_labels
|
return winning_labels
|
||||||
|
|
||||||
|
|
||||||
def knnc(distances: torch.Tensor, labels: torch.LongTensor, k: int = 1):
|
def knnc(distances: torch.Tensor,
|
||||||
|
labels: torch.LongTensor,
|
||||||
|
k: int = 1) -> (torch.LongTensor):
|
||||||
"""K-Nearest-Neighbors-Competition.
|
"""K-Nearest-Neighbors-Competition.
|
||||||
|
|
||||||
Returns the labels corresponding to the winners.
|
Returns the labels corresponding to the winners.
|
||||||
@ -38,7 +41,7 @@ def cbcc(detections: torch.Tensor, reasonings: torch.Tensor):
|
|||||||
pk = A
|
pk = A
|
||||||
nk = (1 - A) * B
|
nk = (1 - A) * B
|
||||||
numerator = (detections @ (pk - nk).T) + nk.sum(1)
|
numerator = (detections @ (pk - nk).T) + nk.sum(1)
|
||||||
probs = numerator / ((pk + nk).sum(1) + 1e-8)
|
probs = numerator / (pk + nk).sum(1)
|
||||||
return probs
|
return probs
|
||||||
|
|
||||||
|
|
||||||
@ -48,8 +51,7 @@ class WTAC(torch.nn.Module):
|
|||||||
Thin wrapper over the `wtac` function.
|
Thin wrapper over the `wtac` function.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def forward(self, distances, labels):
|
||||||
def forward(self, distances, labels): # pylint: disable=no-self-use
|
|
||||||
return wtac(distances, labels)
|
return wtac(distances, labels)
|
||||||
|
|
||||||
|
|
||||||
@ -59,8 +61,7 @@ class LTAC(torch.nn.Module):
|
|||||||
Thin wrapper over the `wtac` function.
|
Thin wrapper over the `wtac` function.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def forward(self, probs, labels):
|
||||||
def forward(self, probs, labels): # pylint: disable=no-self-use
|
|
||||||
return wtac(-1.0 * probs, labels)
|
return wtac(-1.0 * probs, labels)
|
||||||
|
|
||||||
|
|
||||||
@ -70,7 +71,6 @@ class KNNC(torch.nn.Module):
|
|||||||
Thin wrapper over the `knnc` function.
|
Thin wrapper over the `knnc` function.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, k=1, **kwargs):
|
def __init__(self, k=1, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.k = k
|
self.k = k
|
||||||
@ -88,6 +88,5 @@ class CBCC(torch.nn.Module):
|
|||||||
Thin wrapper over the `cbcc` function.
|
Thin wrapper over the `cbcc` function.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def forward(self, detections, reasonings):
|
||||||
def forward(self, detections, reasonings): # pylint: disable=no-self-use
|
|
||||||
return cbcc(detections, reasonings)
|
return cbcc(detections, reasonings)
|
||||||
|
@ -6,8 +6,7 @@ from typing import Union
|
|||||||
import torch
|
import torch
|
||||||
from torch.nn.parameter import Parameter
|
from torch.nn.parameter import Parameter
|
||||||
|
|
||||||
from prototorch.utils import parse_distribution
|
from ..utils import parse_distribution
|
||||||
|
|
||||||
from .initializers import (
|
from .initializers import (
|
||||||
AbstractClassAwareCompInitializer,
|
AbstractClassAwareCompInitializer,
|
||||||
AbstractComponentsInitializer,
|
AbstractComponentsInitializer,
|
||||||
@ -64,7 +63,6 @@ def get_cikwargs(init, distribution):
|
|||||||
|
|
||||||
class AbstractComponents(torch.nn.Module):
|
class AbstractComponents(torch.nn.Module):
|
||||||
"""Abstract class for all components modules."""
|
"""Abstract class for all components modules."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_components(self):
|
def num_components(self):
|
||||||
"""Current number of components."""
|
"""Current number of components."""
|
||||||
@ -87,10 +85,9 @@ class AbstractComponents(torch.nn.Module):
|
|||||||
|
|
||||||
class Components(AbstractComponents):
|
class Components(AbstractComponents):
|
||||||
"""A set of adaptable Tensors."""
|
"""A set of adaptable Tensors."""
|
||||||
|
|
||||||
def __init__(self, num_components: int,
|
def __init__(self, num_components: int,
|
||||||
initializer: AbstractComponentsInitializer):
|
initializer: AbstractComponentsInitializer, **kwargs):
|
||||||
super().__init__()
|
super().__init__(**kwargs)
|
||||||
self.add_components(num_components, initializer)
|
self.add_components(num_components, initializer)
|
||||||
|
|
||||||
def add_components(self, num_components: int,
|
def add_components(self, num_components: int,
|
||||||
@ -115,7 +112,6 @@ class Components(AbstractComponents):
|
|||||||
|
|
||||||
class AbstractLabels(torch.nn.Module):
|
class AbstractLabels(torch.nn.Module):
|
||||||
"""Abstract class for all labels modules."""
|
"""Abstract class for all labels modules."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def labels(self):
|
def labels(self):
|
||||||
return self._labels.cpu()
|
return self._labels.cpu()
|
||||||
@ -156,11 +152,11 @@ class AbstractLabels(torch.nn.Module):
|
|||||||
|
|
||||||
class Labels(AbstractLabels):
|
class Labels(AbstractLabels):
|
||||||
"""A set of standalone labels."""
|
"""A set of standalone labels."""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
distribution: Union[dict, list, tuple],
|
distribution: Union[dict, list, tuple],
|
||||||
initializer: AbstractLabelsInitializer = LabelsInitializer()):
|
initializer: AbstractLabelsInitializer = LabelsInitializer(),
|
||||||
super().__init__()
|
**kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
self.add_labels(distribution, initializer)
|
self.add_labels(distribution, initializer)
|
||||||
|
|
||||||
def add_labels(
|
def add_labels(
|
||||||
@ -187,13 +183,14 @@ class Labels(AbstractLabels):
|
|||||||
|
|
||||||
class LabeledComponents(AbstractComponents):
|
class LabeledComponents(AbstractComponents):
|
||||||
"""A set of adaptable components and corresponding unadaptable labels."""
|
"""A set of adaptable components and corresponding unadaptable labels."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
distribution: Union[dict, list, tuple],
|
distribution: Union[dict, list, tuple],
|
||||||
components_initializer: AbstractComponentsInitializer,
|
components_initializer: AbstractComponentsInitializer,
|
||||||
labels_initializer: AbstractLabelsInitializer = LabelsInitializer()):
|
labels_initializer: AbstractLabelsInitializer = LabelsInitializer(
|
||||||
super().__init__()
|
),
|
||||||
|
**kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
self.add_components(distribution, components_initializer,
|
self.add_components(distribution, components_initializer,
|
||||||
labels_initializer)
|
labels_initializer)
|
||||||
|
|
||||||
@ -255,15 +252,12 @@ class Reasonings(torch.nn.Module):
|
|||||||
The `reasonings` tensor is of shape [num_components, num_classes, 2].
|
The `reasonings` tensor is of shape [num_components, num_classes, 2].
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def __init__(self,
|
||||||
def __init__(
|
distribution: Union[dict, list, tuple],
|
||||||
self,
|
initializer:
|
||||||
distribution: Union[dict, list, tuple],
|
AbstractReasoningsInitializer = RandomReasoningsInitializer(),
|
||||||
initializer:
|
**kwargs):
|
||||||
AbstractReasoningsInitializer = RandomReasoningsInitializer(),
|
super().__init__(**kwargs)
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.add_reasonings(distribution, initializer)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_classes(self):
|
def num_classes(self):
|
||||||
@ -301,7 +295,7 @@ class Reasonings(torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class ReasoningComponents(AbstractComponents):
|
class ReasoningComponents(AbstractComponents):
|
||||||
r"""A set of components and a corresponding adapatable reasoning matrices.
|
"""A set of components and a corresponding adapatable reasoning matrices.
|
||||||
|
|
||||||
Every component has its own reasoning matrix.
|
Every component has its own reasoning matrix.
|
||||||
|
|
||||||
@ -315,14 +309,14 @@ class ReasoningComponents(AbstractComponents):
|
|||||||
three element probability distribution.
|
three element probability distribution.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
distribution: Union[dict, list, tuple],
|
distribution: Union[dict, list, tuple],
|
||||||
components_initializer: AbstractComponentsInitializer,
|
components_initializer: AbstractComponentsInitializer,
|
||||||
reasonings_initializer:
|
reasonings_initializer:
|
||||||
AbstractReasoningsInitializer = PurePositiveReasoningsInitializer()):
|
AbstractReasoningsInitializer = PurePositiveReasoningsInitializer(),
|
||||||
super().__init__()
|
**kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
self.add_components(distribution, components_initializer,
|
self.add_components(distribution, components_initializer,
|
||||||
reasonings_initializer)
|
reasonings_initializer)
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ def squared_euclidean_distance(x, y):
|
|||||||
**Alias:**
|
**Alias:**
|
||||||
``prototorch.functions.distances.sed``
|
``prototorch.functions.distances.sed``
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
expanded_x = x.unsqueeze(dim=1)
|
expanded_x = x.unsqueeze(dim=1)
|
||||||
batchwise_difference = y - expanded_x
|
batchwise_difference = y - expanded_x
|
||||||
differences_raised = torch.pow(batchwise_difference, 2)
|
differences_raised = torch.pow(batchwise_difference, 2)
|
||||||
@ -27,20 +27,23 @@ def euclidean_distance(x, y):
|
|||||||
:returns: Distance Tensor of shape :math:`X \times Y`
|
:returns: Distance Tensor of shape :math:`X \times Y`
|
||||||
:rtype: `torch.tensor`
|
:rtype: `torch.tensor`
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
distances_raised = squared_euclidean_distance(x, y)
|
distances_raised = squared_euclidean_distance(x, y)
|
||||||
distances = torch.sqrt(distances_raised)
|
distances = torch.sqrt(distances_raised)
|
||||||
return distances
|
return distances
|
||||||
|
|
||||||
|
|
||||||
def euclidean_distance_v2(x, y):
|
def euclidean_distance_v2(x, y):
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
diff = y - x.unsqueeze(1)
|
diff = y - x.unsqueeze(1)
|
||||||
pairwise_distances = (diff @ diff.permute((0, 2, 1))).sqrt()
|
pairwise_distances = (diff @ diff.permute((0, 2, 1))).sqrt()
|
||||||
# Passing `dim1=-2` and `dim2=-1` to `diagonal()` takes the
|
# Passing `dim1=-2` and `dim2=-1` to `diagonal()` takes the
|
||||||
# batch diagonal. See:
|
# batch diagonal. See:
|
||||||
# https://pytorch.org/docs/stable/generated/torch.diagonal.html
|
# https://pytorch.org/docs/stable/generated/torch.diagonal.html
|
||||||
distances = torch.diagonal(pairwise_distances, dim1=-2, dim2=-1)
|
distances = torch.diagonal(pairwise_distances, dim1=-2, dim2=-1)
|
||||||
|
# print(f"{diff.shape=}") # (nx, ny, ndim)
|
||||||
|
# print(f"{pairwise_distances.shape=}") # (nx, ny, ny)
|
||||||
|
# print(f"{distances.shape=}") # (nx, ny)
|
||||||
return distances
|
return distances
|
||||||
|
|
||||||
|
|
||||||
@ -54,7 +57,7 @@ def lpnorm_distance(x, y, p):
|
|||||||
|
|
||||||
:param p: p parameter of the lp norm
|
:param p: p parameter of the lp norm
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
distances = torch.cdist(x, y, p=p)
|
distances = torch.cdist(x, y, p=p)
|
||||||
return distances
|
return distances
|
||||||
|
|
||||||
@ -66,7 +69,7 @@ def omega_distance(x, y, omega):
|
|||||||
|
|
||||||
:param `torch.tensor` omega: Two dimensional matrix
|
:param `torch.tensor` omega: Two dimensional matrix
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
projected_x = x @ omega
|
projected_x = x @ omega
|
||||||
projected_y = y @ omega
|
projected_y = y @ omega
|
||||||
distances = squared_euclidean_distance(projected_x, projected_y)
|
distances = squared_euclidean_distance(projected_x, projected_y)
|
||||||
@ -80,7 +83,7 @@ def lomega_distance(x, y, omegas):
|
|||||||
|
|
||||||
:param `torch.tensor` omegas: Three dimensional matrix
|
:param `torch.tensor` omegas: Three dimensional matrix
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
projected_x = x @ omegas
|
projected_x = x @ omegas
|
||||||
projected_y = torch.diagonal(y @ omegas).T
|
projected_y = torch.diagonal(y @ omegas).T
|
||||||
expanded_y = torch.unsqueeze(projected_y, dim=1)
|
expanded_y = torch.unsqueeze(projected_y, dim=1)
|
||||||
|
@ -3,15 +3,11 @@
|
|||||||
import warnings
|
import warnings
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
from typing import (
|
from typing import Union
|
||||||
Callable,
|
|
||||||
Type,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from prototorch.utils import parse_data_arg, parse_distribution
|
from ..utils import parse_data_arg, parse_distribution
|
||||||
|
|
||||||
|
|
||||||
# Components
|
# Components
|
||||||
@ -26,18 +22,11 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
|
|||||||
Use this to 'generate' pre-initialized components elsewhere.
|
Use this to 'generate' pre-initialized components elsewhere.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, components):
|
def __init__(self, components):
|
||||||
self.components = components
|
self.components = components
|
||||||
|
|
||||||
def generate(self, num_components: int = 0):
|
def generate(self, num_components: int = 0):
|
||||||
"""Ignore `num_components` and simply return `self.components`."""
|
"""Ignore `num_components` and simply return `self.components`."""
|
||||||
provided_num_components = len(self.components)
|
|
||||||
if provided_num_components != num_components:
|
|
||||||
wmsg = f"The number of components ({provided_num_components}) " \
|
|
||||||
f"provided to {self.__class__.__name__} " \
|
|
||||||
f"does not match the expected number ({num_components})."
|
|
||||||
warnings.warn(wmsg)
|
|
||||||
if not isinstance(self.components, torch.Tensor):
|
if not isinstance(self.components, torch.Tensor):
|
||||||
wmsg = f"Converting components to {torch.Tensor}..."
|
wmsg = f"Converting components to {torch.Tensor}..."
|
||||||
warnings.warn(wmsg)
|
warnings.warn(wmsg)
|
||||||
@ -47,7 +36,6 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
|
|||||||
|
|
||||||
class ShapeAwareCompInitializer(AbstractComponentsInitializer):
|
class ShapeAwareCompInitializer(AbstractComponentsInitializer):
|
||||||
"""Abstract class for all dimension-aware components initializers."""
|
"""Abstract class for all dimension-aware components initializers."""
|
||||||
|
|
||||||
def __init__(self, shape: Union[Iterable, int]):
|
def __init__(self, shape: Union[Iterable, int]):
|
||||||
if isinstance(shape, Iterable):
|
if isinstance(shape, Iterable):
|
||||||
self.component_shape = tuple(shape)
|
self.component_shape = tuple(shape)
|
||||||
@ -61,7 +49,6 @@ class ShapeAwareCompInitializer(AbstractComponentsInitializer):
|
|||||||
|
|
||||||
class ZerosCompInitializer(ShapeAwareCompInitializer):
|
class ZerosCompInitializer(ShapeAwareCompInitializer):
|
||||||
"""Generate zeros corresponding to the components shape."""
|
"""Generate zeros corresponding to the components shape."""
|
||||||
|
|
||||||
def generate(self, num_components: int):
|
def generate(self, num_components: int):
|
||||||
components = torch.zeros((num_components, ) + self.component_shape)
|
components = torch.zeros((num_components, ) + self.component_shape)
|
||||||
return components
|
return components
|
||||||
@ -69,7 +56,6 @@ class ZerosCompInitializer(ShapeAwareCompInitializer):
|
|||||||
|
|
||||||
class OnesCompInitializer(ShapeAwareCompInitializer):
|
class OnesCompInitializer(ShapeAwareCompInitializer):
|
||||||
"""Generate ones corresponding to the components shape."""
|
"""Generate ones corresponding to the components shape."""
|
||||||
|
|
||||||
def generate(self, num_components: int):
|
def generate(self, num_components: int):
|
||||||
components = torch.ones((num_components, ) + self.component_shape)
|
components = torch.ones((num_components, ) + self.component_shape)
|
||||||
return components
|
return components
|
||||||
@ -77,7 +63,6 @@ class OnesCompInitializer(ShapeAwareCompInitializer):
|
|||||||
|
|
||||||
class FillValueCompInitializer(OnesCompInitializer):
|
class FillValueCompInitializer(OnesCompInitializer):
|
||||||
"""Generate components with the provided `fill_value`."""
|
"""Generate components with the provided `fill_value`."""
|
||||||
|
|
||||||
def __init__(self, shape, fill_value: float = 1.0):
|
def __init__(self, shape, fill_value: float = 1.0):
|
||||||
super().__init__(shape)
|
super().__init__(shape)
|
||||||
self.fill_value = fill_value
|
self.fill_value = fill_value
|
||||||
@ -90,7 +75,6 @@ class FillValueCompInitializer(OnesCompInitializer):
|
|||||||
|
|
||||||
class UniformCompInitializer(OnesCompInitializer):
|
class UniformCompInitializer(OnesCompInitializer):
|
||||||
"""Generate components by sampling from a continuous uniform distribution."""
|
"""Generate components by sampling from a continuous uniform distribution."""
|
||||||
|
|
||||||
def __init__(self, shape, minimum=0.0, maximum=1.0, scale=1.0):
|
def __init__(self, shape, minimum=0.0, maximum=1.0, scale=1.0):
|
||||||
super().__init__(shape)
|
super().__init__(shape)
|
||||||
self.minimum = minimum
|
self.minimum = minimum
|
||||||
@ -105,7 +89,6 @@ class UniformCompInitializer(OnesCompInitializer):
|
|||||||
|
|
||||||
class RandomNormalCompInitializer(OnesCompInitializer):
|
class RandomNormalCompInitializer(OnesCompInitializer):
|
||||||
"""Generate components by sampling from a standard normal distribution."""
|
"""Generate components by sampling from a standard normal distribution."""
|
||||||
|
|
||||||
def __init__(self, shape, shift=0.0, scale=1.0):
|
def __init__(self, shape, shift=0.0, scale=1.0):
|
||||||
super().__init__(shape)
|
super().__init__(shape)
|
||||||
self.shift = shift
|
self.shift = shift
|
||||||
@ -126,11 +109,10 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
|
|||||||
`data` has to be a torch tensor.
|
`data` has to be a torch tensor.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
data: torch.Tensor,
|
data: torch.TensorType,
|
||||||
noise: float = 0.0,
|
noise: float = 0.0,
|
||||||
transform: Callable = torch.nn.Identity()):
|
transform: callable = torch.nn.Identity()):
|
||||||
self.data = data
|
self.data = data
|
||||||
self.noise = noise
|
self.noise = noise
|
||||||
self.transform = transform
|
self.transform = transform
|
||||||
@ -151,7 +133,6 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
|
|||||||
|
|
||||||
class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
|
class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
|
||||||
"""'Generate' the components from the provided data."""
|
"""'Generate' the components from the provided data."""
|
||||||
|
|
||||||
def generate(self, num_components: int = 0):
|
def generate(self, num_components: int = 0):
|
||||||
"""Ignore `num_components` and simply return transformed `self.data`."""
|
"""Ignore `num_components` and simply return transformed `self.data`."""
|
||||||
components = self.generate_end_hook(self.data)
|
components = self.generate_end_hook(self.data)
|
||||||
@ -160,7 +141,6 @@ class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
|
|||||||
|
|
||||||
class SelectionCompInitializer(AbstractDataAwareCompInitializer):
|
class SelectionCompInitializer(AbstractDataAwareCompInitializer):
|
||||||
"""Generate components by uniformly sampling from the provided data."""
|
"""Generate components by uniformly sampling from the provided data."""
|
||||||
|
|
||||||
def generate(self, num_components: int):
|
def generate(self, num_components: int):
|
||||||
indices = torch.LongTensor(num_components).random_(0, len(self.data))
|
indices = torch.LongTensor(num_components).random_(0, len(self.data))
|
||||||
samples = self.data[indices]
|
samples = self.data[indices]
|
||||||
@ -170,16 +150,15 @@ class SelectionCompInitializer(AbstractDataAwareCompInitializer):
|
|||||||
|
|
||||||
class MeanCompInitializer(AbstractDataAwareCompInitializer):
|
class MeanCompInitializer(AbstractDataAwareCompInitializer):
|
||||||
"""Generate components by computing the mean of the provided data."""
|
"""Generate components by computing the mean of the provided data."""
|
||||||
|
|
||||||
def generate(self, num_components: int):
|
def generate(self, num_components: int):
|
||||||
mean = self.data.mean(dim=0)
|
mean = torch.mean(self.data, dim=0)
|
||||||
repeat_dim = [num_components] + [1] * len(mean.shape)
|
repeat_dim = [num_components] + [1] * len(mean.shape)
|
||||||
samples = mean.repeat(repeat_dim)
|
samples = mean.repeat(repeat_dim)
|
||||||
components = self.generate_end_hook(samples)
|
components = self.generate_end_hook(samples)
|
||||||
return components
|
return components
|
||||||
|
|
||||||
|
|
||||||
class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
|
class AbstractClassAwareCompInitializer(AbstractDataAwareCompInitializer):
|
||||||
"""Abstract class for all class-aware components initializers.
|
"""Abstract class for all class-aware components initializers.
|
||||||
|
|
||||||
Components generated by class-aware components initializers inherit the shape
|
Components generated by class-aware components initializers inherit the shape
|
||||||
@ -189,22 +168,16 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
|
|||||||
target tensors.
|
target tensors.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
data,
|
data,
|
||||||
noise: float = 0.0,
|
noise: float = 0.0,
|
||||||
transform: Callable = torch.nn.Identity()):
|
transform: callable = torch.nn.Identity()):
|
||||||
self.data, self.targets = parse_data_arg(data)
|
self.data, self.targets = parse_data_arg(data)
|
||||||
self.noise = noise
|
self.noise = noise
|
||||||
self.transform = transform
|
self.transform = transform
|
||||||
self.clabels = torch.unique(self.targets).int().tolist()
|
self.clabels = torch.unique(self.targets).int().tolist()
|
||||||
self.num_classes = len(self.clabels)
|
self.num_classes = len(self.clabels)
|
||||||
|
|
||||||
def generate_end_hook(self, samples):
|
|
||||||
drift = torch.rand_like(samples) * self.noise
|
|
||||||
components = self.transform(samples + drift)
|
|
||||||
return components
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
...
|
...
|
||||||
@ -217,7 +190,6 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
|
|||||||
|
|
||||||
class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
|
class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
|
||||||
"""'Generate' components from provided data and requested distribution."""
|
"""'Generate' components from provided data and requested distribution."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
"""Ignore `distribution` and simply return transformed `self.data`."""
|
"""Ignore `distribution` and simply return transformed `self.data`."""
|
||||||
components = self.generate_end_hook(self.data)
|
components = self.generate_end_hook(self.data)
|
||||||
@ -226,10 +198,9 @@ class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
|
|||||||
|
|
||||||
class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
||||||
"""Abstract class for all stratified components initializers."""
|
"""Abstract class for all stratified components initializers."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def subinit_type(self) -> Type[AbstractDataAwareCompInitializer]:
|
def subinit_type(self) -> AbstractDataAwareCompInitializer:
|
||||||
...
|
...
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
@ -237,8 +208,6 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
|||||||
components = torch.tensor([])
|
components = torch.tensor([])
|
||||||
for k, v in distribution.items():
|
for k, v in distribution.items():
|
||||||
stratified_data = self.data[self.targets == k]
|
stratified_data = self.data[self.targets == k]
|
||||||
if len(stratified_data) == 0:
|
|
||||||
raise ValueError(f"No data available for class {k}.")
|
|
||||||
initializer = self.subinit_type(
|
initializer = self.subinit_type(
|
||||||
stratified_data,
|
stratified_data,
|
||||||
noise=self.noise,
|
noise=self.noise,
|
||||||
@ -251,7 +220,6 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
|
|||||||
|
|
||||||
class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
|
class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
|
||||||
"""Generate components using stratified sampling from the provided data."""
|
"""Generate components using stratified sampling from the provided data."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def subinit_type(self):
|
def subinit_type(self):
|
||||||
return SelectionCompInitializer
|
return SelectionCompInitializer
|
||||||
@ -259,7 +227,6 @@ class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
|
|||||||
|
|
||||||
class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
|
class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
|
||||||
"""Generate components at stratified means of the provided data."""
|
"""Generate components at stratified means of the provided data."""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def subinit_type(self):
|
def subinit_type(self):
|
||||||
return MeanCompInitializer
|
return MeanCompInitializer
|
||||||
@ -268,7 +235,6 @@ class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
|
|||||||
# Labels
|
# Labels
|
||||||
class AbstractLabelsInitializer(ABC):
|
class AbstractLabelsInitializer(ABC):
|
||||||
"""Abstract class for all labels initializers."""
|
"""Abstract class for all labels initializers."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
...
|
...
|
||||||
@ -280,7 +246,6 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
|
|||||||
Use this to 'generate' pre-initialized labels elsewhere.
|
Use this to 'generate' pre-initialized labels elsewhere.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, labels):
|
def __init__(self, labels):
|
||||||
self.labels = labels
|
self.labels = labels
|
||||||
|
|
||||||
@ -299,7 +264,6 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
|
|||||||
|
|
||||||
class DataAwareLabelsInitializer(AbstractLabelsInitializer):
|
class DataAwareLabelsInitializer(AbstractLabelsInitializer):
|
||||||
"""'Generate' the labels from a torch Dataset."""
|
"""'Generate' the labels from a torch Dataset."""
|
||||||
|
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
self.data, self.targets = parse_data_arg(data)
|
self.data, self.targets = parse_data_arg(data)
|
||||||
|
|
||||||
@ -310,19 +274,17 @@ class DataAwareLabelsInitializer(AbstractLabelsInitializer):
|
|||||||
|
|
||||||
class LabelsInitializer(AbstractLabelsInitializer):
|
class LabelsInitializer(AbstractLabelsInitializer):
|
||||||
"""Generate labels from `distribution`."""
|
"""Generate labels from `distribution`."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
distribution = parse_distribution(distribution)
|
distribution = parse_distribution(distribution)
|
||||||
labels_list = []
|
labels = []
|
||||||
for k, v in distribution.items():
|
for k, v in distribution.items():
|
||||||
labels_list.extend([k] * v)
|
labels.extend([k] * v)
|
||||||
labels = torch.LongTensor(labels_list)
|
labels = torch.LongTensor(labels)
|
||||||
return labels
|
return labels
|
||||||
|
|
||||||
|
|
||||||
class OneHotLabelsInitializer(LabelsInitializer):
|
class OneHotLabelsInitializer(LabelsInitializer):
|
||||||
"""Generate one-hot-encoded labels from `distribution`."""
|
"""Generate one-hot-encoded labels from `distribution`."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
distribution = parse_distribution(distribution)
|
distribution = parse_distribution(distribution)
|
||||||
num_classes = len(distribution.keys())
|
num_classes = len(distribution.keys())
|
||||||
@ -332,19 +294,17 @@ class OneHotLabelsInitializer(LabelsInitializer):
|
|||||||
|
|
||||||
|
|
||||||
# Reasonings
|
# Reasonings
|
||||||
def compute_distribution_shape(distribution):
|
|
||||||
distribution = parse_distribution(distribution)
|
|
||||||
num_components = sum(distribution.values())
|
|
||||||
num_classes = len(distribution.keys())
|
|
||||||
return (num_components, num_classes, 2)
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractReasoningsInitializer(ABC):
|
class AbstractReasoningsInitializer(ABC):
|
||||||
"""Abstract class for all reasonings initializers."""
|
"""Abstract class for all reasonings initializers."""
|
||||||
|
|
||||||
def __init__(self, components_first: bool = True):
|
def __init__(self, components_first: bool = True):
|
||||||
self.components_first = components_first
|
self.components_first = components_first
|
||||||
|
|
||||||
|
def compute_shape(self, distribution):
|
||||||
|
distribution = parse_distribution(distribution)
|
||||||
|
num_components = sum(distribution.values())
|
||||||
|
num_classes = len(distribution.keys())
|
||||||
|
return (num_components, num_classes, 2)
|
||||||
|
|
||||||
def generate_end_hook(self, reasonings):
|
def generate_end_hook(self, reasonings):
|
||||||
if not self.components_first:
|
if not self.components_first:
|
||||||
reasonings = reasonings.permute(2, 1, 0)
|
reasonings = reasonings.permute(2, 1, 0)
|
||||||
@ -362,7 +322,6 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
|
|||||||
Use this to 'generate' pre-initialized reasonings elsewhere.
|
Use this to 'generate' pre-initialized reasonings elsewhere.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, reasonings, **kwargs):
|
def __init__(self, reasonings, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.reasonings = reasonings
|
self.reasonings = reasonings
|
||||||
@ -380,9 +339,8 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
|
|||||||
|
|
||||||
class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
|
class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
|
||||||
"""Reasonings are all initialized with zeros."""
|
"""Reasonings are all initialized with zeros."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
shape = compute_distribution_shape(distribution)
|
shape = self.compute_shape(distribution)
|
||||||
reasonings = torch.zeros(*shape)
|
reasonings = torch.zeros(*shape)
|
||||||
reasonings = self.generate_end_hook(reasonings)
|
reasonings = self.generate_end_hook(reasonings)
|
||||||
return reasonings
|
return reasonings
|
||||||
@ -390,9 +348,8 @@ class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
|
|||||||
|
|
||||||
class OnesReasoningsInitializer(AbstractReasoningsInitializer):
|
class OnesReasoningsInitializer(AbstractReasoningsInitializer):
|
||||||
"""Reasonings are all initialized with ones."""
|
"""Reasonings are all initialized with ones."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
shape = compute_distribution_shape(distribution)
|
shape = self.compute_shape(distribution)
|
||||||
reasonings = torch.ones(*shape)
|
reasonings = torch.ones(*shape)
|
||||||
reasonings = self.generate_end_hook(reasonings)
|
reasonings = self.generate_end_hook(reasonings)
|
||||||
return reasonings
|
return reasonings
|
||||||
@ -400,14 +357,13 @@ class OnesReasoningsInitializer(AbstractReasoningsInitializer):
|
|||||||
|
|
||||||
class RandomReasoningsInitializer(AbstractReasoningsInitializer):
|
class RandomReasoningsInitializer(AbstractReasoningsInitializer):
|
||||||
"""Reasonings are randomly initialized."""
|
"""Reasonings are randomly initialized."""
|
||||||
|
|
||||||
def __init__(self, minimum=0.4, maximum=0.6, **kwargs):
|
def __init__(self, minimum=0.4, maximum=0.6, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.minimum = minimum
|
self.minimum = minimum
|
||||||
self.maximum = maximum
|
self.maximum = maximum
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
shape = compute_distribution_shape(distribution)
|
shape = self.compute_shape(distribution)
|
||||||
reasonings = torch.ones(*shape).uniform_(self.minimum, self.maximum)
|
reasonings = torch.ones(*shape).uniform_(self.minimum, self.maximum)
|
||||||
reasonings = self.generate_end_hook(reasonings)
|
reasonings = self.generate_end_hook(reasonings)
|
||||||
return reasonings
|
return reasonings
|
||||||
@ -415,10 +371,8 @@ class RandomReasoningsInitializer(AbstractReasoningsInitializer):
|
|||||||
|
|
||||||
class PurePositiveReasoningsInitializer(AbstractReasoningsInitializer):
|
class PurePositiveReasoningsInitializer(AbstractReasoningsInitializer):
|
||||||
"""Each component reasons positively for exactly one class."""
|
"""Each component reasons positively for exactly one class."""
|
||||||
|
|
||||||
def generate(self, distribution: Union[dict, list, tuple]):
|
def generate(self, distribution: Union[dict, list, tuple]):
|
||||||
num_components, num_classes, _ = compute_distribution_shape(
|
num_components, num_classes, _ = self.compute_shape(distribution)
|
||||||
distribution)
|
|
||||||
A = OneHotLabelsInitializer().generate(distribution)
|
A = OneHotLabelsInitializer().generate(distribution)
|
||||||
B = torch.zeros(num_components, num_classes)
|
B = torch.zeros(num_components, num_classes)
|
||||||
reasonings = torch.stack([A, B], dim=-1)
|
reasonings = torch.stack([A, B], dim=-1)
|
||||||
@ -434,7 +388,6 @@ class AbstractTransformInitializer(ABC):
|
|||||||
|
|
||||||
class AbstractLinearTransformInitializer(AbstractTransformInitializer):
|
class AbstractLinearTransformInitializer(AbstractTransformInitializer):
|
||||||
"""Abstract class for all linear transform initializers."""
|
"""Abstract class for all linear transform initializers."""
|
||||||
|
|
||||||
def __init__(self, out_dim_first: bool = False):
|
def __init__(self, out_dim_first: bool = False):
|
||||||
self.out_dim_first = out_dim_first
|
self.out_dim_first = out_dim_first
|
||||||
|
|
||||||
@ -451,7 +404,6 @@ class AbstractLinearTransformInitializer(AbstractTransformInitializer):
|
|||||||
|
|
||||||
class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
|
class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||||
"""Initialize a matrix with zeros."""
|
"""Initialize a matrix with zeros."""
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
def generate(self, in_dim: int, out_dim: int):
|
||||||
weights = torch.zeros(in_dim, out_dim)
|
weights = torch.zeros(in_dim, out_dim)
|
||||||
return self.generate_end_hook(weights)
|
return self.generate_end_hook(weights)
|
||||||
@ -459,23 +411,13 @@ class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
|
|||||||
|
|
||||||
class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
|
class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
|
||||||
"""Initialize a matrix with ones."""
|
"""Initialize a matrix with ones."""
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
def generate(self, in_dim: int, out_dim: int):
|
||||||
weights = torch.ones(in_dim, out_dim)
|
weights = torch.ones(in_dim, out_dim)
|
||||||
return self.generate_end_hook(weights)
|
return self.generate_end_hook(weights)
|
||||||
|
|
||||||
|
|
||||||
class RandomLinearTransformInitializer(AbstractLinearTransformInitializer):
|
class EyeTransformInitializer(AbstractLinearTransformInitializer):
|
||||||
"""Initialize a matrix with random values."""
|
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
|
||||||
weights = torch.rand(in_dim, out_dim)
|
|
||||||
return self.generate_end_hook(weights)
|
|
||||||
|
|
||||||
|
|
||||||
class EyeLinearTransformInitializer(AbstractLinearTransformInitializer):
|
|
||||||
"""Initialize a matrix with the largest possible identity matrix."""
|
"""Initialize a matrix with the largest possible identity matrix."""
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
def generate(self, in_dim: int, out_dim: int):
|
||||||
weights = torch.zeros(in_dim, out_dim)
|
weights = torch.zeros(in_dim, out_dim)
|
||||||
I = torch.eye(min(in_dim, out_dim))
|
I = torch.eye(min(in_dim, out_dim))
|
||||||
@ -483,42 +425,6 @@ class EyeLinearTransformInitializer(AbstractLinearTransformInitializer):
|
|||||||
return self.generate_end_hook(weights)
|
return self.generate_end_hook(weights)
|
||||||
|
|
||||||
|
|
||||||
class AbstractDataAwareLTInitializer(AbstractLinearTransformInitializer):
|
|
||||||
"""Abstract class for all data-aware linear transform initializers."""
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
data: torch.Tensor,
|
|
||||||
noise: float = 0.0,
|
|
||||||
transform: Callable = torch.nn.Identity(),
|
|
||||||
out_dim_first: bool = False):
|
|
||||||
super().__init__(out_dim_first)
|
|
||||||
self.data = data
|
|
||||||
self.noise = noise
|
|
||||||
self.transform = transform
|
|
||||||
|
|
||||||
def generate_end_hook(self, weights: torch.Tensor):
|
|
||||||
drift = torch.rand_like(weights) * self.noise
|
|
||||||
weights = self.transform(weights + drift)
|
|
||||||
if self.out_dim_first:
|
|
||||||
weights = weights.permute(1, 0)
|
|
||||||
return weights
|
|
||||||
|
|
||||||
|
|
||||||
class PCALinearTransformInitializer(AbstractDataAwareLTInitializer):
|
|
||||||
"""Initialize a matrix with Eigenvectors from the data."""
|
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
|
||||||
_, _, weights = torch.pca_lowrank(self.data, q=out_dim)
|
|
||||||
return self.generate_end_hook(weights)
|
|
||||||
|
|
||||||
|
|
||||||
class LiteralLinearTransformInitializer(AbstractDataAwareLTInitializer):
|
|
||||||
"""'Generate' the provided weights."""
|
|
||||||
|
|
||||||
def generate(self, in_dim: int, out_dim: int):
|
|
||||||
return self.generate_end_hook(self.data)
|
|
||||||
|
|
||||||
|
|
||||||
# Aliases - Components
|
# Aliases - Components
|
||||||
CACI = ClassAwareCompInitializer
|
CACI = ClassAwareCompInitializer
|
||||||
DACI = DataAwareCompInitializer
|
DACI = DataAwareCompInitializer
|
||||||
@ -547,9 +453,6 @@ RRI = RandomReasoningsInitializer
|
|||||||
ZRI = ZerosReasoningsInitializer
|
ZRI = ZerosReasoningsInitializer
|
||||||
|
|
||||||
# Aliases - Transforms
|
# Aliases - Transforms
|
||||||
ELTI = Eye = EyeLinearTransformInitializer
|
Eye = EyeTransformInitializer
|
||||||
OLTI = OnesLinearTransformInitializer
|
OLTI = OnesLinearTransformInitializer
|
||||||
RLTI = RandomLinearTransformInitializer
|
|
||||||
ZLTI = ZerosLinearTransformInitializer
|
ZLTI = ZerosLinearTransformInitializer
|
||||||
PCALTI = PCALinearTransformInitializer
|
|
||||||
LLTI = LiteralLinearTransformInitializer
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from prototorch.nn.activations import get_activation
|
from ..nn.activations import get_activation
|
||||||
|
|
||||||
|
|
||||||
# Helpers
|
# Helpers
|
||||||
@ -106,31 +106,20 @@ def margin_loss(y_pred, y_true, margin=0.3):
|
|||||||
|
|
||||||
|
|
||||||
class GLVQLoss(torch.nn.Module):
|
class GLVQLoss(torch.nn.Module):
|
||||||
|
def __init__(self, margin=0.0, squashing="identity", beta=10, **kwargs):
|
||||||
def __init__(self,
|
|
||||||
margin=0.0,
|
|
||||||
transfer_fn="identity",
|
|
||||||
beta=10,
|
|
||||||
add_dp=False,
|
|
||||||
**kwargs):
|
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.margin = margin
|
self.margin = margin
|
||||||
self.transfer_fn = get_activation(transfer_fn)
|
self.squashing = get_activation(squashing)
|
||||||
self.beta = torch.tensor(beta)
|
self.beta = torch.tensor(beta)
|
||||||
self.add_dp = add_dp
|
|
||||||
|
|
||||||
def forward(self, outputs, targets, plabels):
|
def forward(self, outputs, targets):
|
||||||
# mu = glvq_loss(outputs, targets, plabels)
|
distances, plabels = outputs
|
||||||
dp, dm = _get_dp_dm(outputs, targets, plabels)
|
mu = glvq_loss(distances, targets, prototype_labels=plabels)
|
||||||
mu = (dp - dm) / (dp + dm)
|
batch_loss = self.squashing(mu + self.margin, beta=self.beta)
|
||||||
if self.add_dp:
|
return torch.sum(batch_loss, dim=0)
|
||||||
mu = mu + dp
|
|
||||||
batch_loss = self.transfer_fn(mu + self.margin, beta=self.beta)
|
|
||||||
return batch_loss.sum()
|
|
||||||
|
|
||||||
|
|
||||||
class MarginLoss(torch.nn.modules.loss._Loss):
|
class MarginLoss(torch.nn.modules.loss._Loss):
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
margin=0.3,
|
margin=0.3,
|
||||||
size_average=None,
|
size_average=None,
|
||||||
@ -144,7 +133,6 @@ class MarginLoss(torch.nn.modules.loss._Loss):
|
|||||||
|
|
||||||
|
|
||||||
class NeuralGasEnergy(torch.nn.Module):
|
class NeuralGasEnergy(torch.nn.Module):
|
||||||
|
|
||||||
def __init__(self, lm, **kwargs):
|
def __init__(self, lm, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.lm = lm
|
self.lm = lm
|
||||||
@ -165,7 +153,6 @@ class NeuralGasEnergy(torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class GrowingNeuralGasEnergy(NeuralGasEnergy):
|
class GrowingNeuralGasEnergy(NeuralGasEnergy):
|
||||||
|
|
||||||
def __init__(self, topology_layer, **kwargs):
|
def __init__(self, topology_layer, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.topology_layer = topology_layer
|
self.topology_layer = topology_layer
|
||||||
|
@ -82,27 +82,23 @@ def stratified_prod_pooling(values: torch.Tensor,
|
|||||||
|
|
||||||
class StratifiedSumPooling(torch.nn.Module):
|
class StratifiedSumPooling(torch.nn.Module):
|
||||||
"""Thin wrapper over the `stratified_sum_pooling` function."""
|
"""Thin wrapper over the `stratified_sum_pooling` function."""
|
||||||
|
def forward(self, values, labels):
|
||||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
|
||||||
return stratified_sum_pooling(values, labels)
|
return stratified_sum_pooling(values, labels)
|
||||||
|
|
||||||
|
|
||||||
class StratifiedProdPooling(torch.nn.Module):
|
class StratifiedProdPooling(torch.nn.Module):
|
||||||
"""Thin wrapper over the `stratified_prod_pooling` function."""
|
"""Thin wrapper over the `stratified_prod_pooling` function."""
|
||||||
|
def forward(self, values, labels):
|
||||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
|
||||||
return stratified_prod_pooling(values, labels)
|
return stratified_prod_pooling(values, labels)
|
||||||
|
|
||||||
|
|
||||||
class StratifiedMinPooling(torch.nn.Module):
|
class StratifiedMinPooling(torch.nn.Module):
|
||||||
"""Thin wrapper over the `stratified_min_pooling` function."""
|
"""Thin wrapper over the `stratified_min_pooling` function."""
|
||||||
|
def forward(self, values, labels):
|
||||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
|
||||||
return stratified_min_pooling(values, labels)
|
return stratified_min_pooling(values, labels)
|
||||||
|
|
||||||
|
|
||||||
class StratifiedMaxPooling(torch.nn.Module):
|
class StratifiedMaxPooling(torch.nn.Module):
|
||||||
"""Thin wrapper over the `stratified_max_pooling` function."""
|
"""Thin wrapper over the `stratified_max_pooling` function."""
|
||||||
|
def forward(self, values, labels):
|
||||||
def forward(self, values, labels): # pylint: disable=no-self-use
|
|
||||||
return stratified_max_pooling(values, labels)
|
return stratified_max_pooling(values, labels)
|
||||||
|
@ -21,7 +21,7 @@ def cosine_similarity(x, y):
|
|||||||
Expected dimension of x is 2.
|
Expected dimension of x is 2.
|
||||||
Expected dimension of y is 2.
|
Expected dimension of y is 2.
|
||||||
"""
|
"""
|
||||||
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
|
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
|
||||||
norm_x = x.pow(2).sum(1).sqrt()
|
norm_x = x.pow(2).sum(1).sqrt()
|
||||||
norm_y = y.pow(2).sum(1).sqrt()
|
norm_y = y.pow(2).sum(1).sqrt()
|
||||||
norm_mat = norm_x.unsqueeze(-1) @ norm_y.unsqueeze(-1).T
|
norm_mat = norm_x.unsqueeze(-1) @ norm_y.unsqueeze(-1).T
|
||||||
|
@ -5,19 +5,19 @@ from torch.nn.parameter import Parameter
|
|||||||
|
|
||||||
from .initializers import (
|
from .initializers import (
|
||||||
AbstractLinearTransformInitializer,
|
AbstractLinearTransformInitializer,
|
||||||
EyeLinearTransformInitializer,
|
EyeTransformInitializer,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class LinearTransform(torch.nn.Module):
|
class LinearTransform(torch.nn.Module):
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
in_dim: int,
|
in_dim: int,
|
||||||
out_dim: int,
|
out_dim: int,
|
||||||
initializer:
|
initializer:
|
||||||
AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
|
AbstractLinearTransformInitializer = EyeTransformInitializer(),
|
||||||
super().__init__()
|
**kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
self.set_weights(in_dim, out_dim, initializer)
|
self.set_weights(in_dim, out_dim, initializer)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -32,15 +32,12 @@ class LinearTransform(torch.nn.Module):
|
|||||||
in_dim: int,
|
in_dim: int,
|
||||||
out_dim: int,
|
out_dim: int,
|
||||||
initializer:
|
initializer:
|
||||||
AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
|
AbstractLinearTransformInitializer = EyeTransformInitializer()):
|
||||||
weights = initializer.generate(in_dim, out_dim)
|
weights = initializer.generate(in_dim, out_dim)
|
||||||
self._register_weights(weights)
|
self._register_weights(weights)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
return x @ self._weights
|
return x @ self.weights.T
|
||||||
|
|
||||||
def extra_repr(self):
|
|
||||||
return f"weights: (shape: {tuple(self._weights.shape)})"
|
|
||||||
|
|
||||||
|
|
||||||
# Aliases
|
# Aliases
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
"""ProtoTorch datasets"""
|
"""ProtoTorch datasets"""
|
||||||
|
|
||||||
from .abstract import CSVDataset, NumpyDataset
|
from .abstract import NumpyDataset
|
||||||
from .sklearn import (
|
from .sklearn import (
|
||||||
Blobs,
|
Blobs,
|
||||||
Circles,
|
Circles,
|
||||||
@ -10,4 +10,3 @@ from .sklearn import (
|
|||||||
)
|
)
|
||||||
from .spiral import Spiral
|
from .spiral import Spiral
|
||||||
from .tecator import Tecator
|
from .tecator import Tecator
|
||||||
from .xor import XOR
|
|
||||||
|
@ -10,7 +10,6 @@ https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
|
||||||
@ -20,7 +19,7 @@ class Dataset(torch.utils.data.Dataset):
|
|||||||
_repr_indent = 2
|
_repr_indent = 2
|
||||||
|
|
||||||
def __init__(self, root):
|
def __init__(self, root):
|
||||||
if isinstance(root, str):
|
if isinstance(root, torch._six.string_classes):
|
||||||
root = os.path.expanduser(root)
|
root = os.path.expanduser(root)
|
||||||
self.root = root
|
self.root = root
|
||||||
|
|
||||||
@ -93,23 +92,8 @@ class ProtoDataset(Dataset):
|
|||||||
|
|
||||||
class NumpyDataset(torch.utils.data.TensorDataset):
|
class NumpyDataset(torch.utils.data.TensorDataset):
|
||||||
"""Create a PyTorch TensorDataset from NumPy arrays."""
|
"""Create a PyTorch TensorDataset from NumPy arrays."""
|
||||||
|
|
||||||
def __init__(self, data, targets):
|
def __init__(self, data, targets):
|
||||||
self.data = torch.Tensor(data)
|
self.data = torch.Tensor(data)
|
||||||
self.targets = torch.LongTensor(targets)
|
self.targets = torch.LongTensor(targets)
|
||||||
tensors = [self.data, self.targets]
|
tensors = [self.data, self.targets]
|
||||||
super().__init__(*tensors)
|
super().__init__(*tensors)
|
||||||
|
|
||||||
|
|
||||||
class CSVDataset(NumpyDataset):
|
|
||||||
"""Create a Dataset from a CSV file."""
|
|
||||||
|
|
||||||
def __init__(self, filepath, target_col=-1, delimiter=',', skip_header=0):
|
|
||||||
raw = np.genfromtxt(
|
|
||||||
filepath,
|
|
||||||
delimiter=delimiter,
|
|
||||||
skip_header=skip_header,
|
|
||||||
)
|
|
||||||
data = np.delete(raw, 1, target_col)
|
|
||||||
targets = raw[:, target_col]
|
|
||||||
super().__init__(data, targets)
|
|
||||||
|
@ -5,21 +5,14 @@ URL:
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from typing import Sequence
|
from typing import Sequence, Union
|
||||||
|
|
||||||
from sklearn.datasets import (
|
|
||||||
load_iris,
|
|
||||||
make_blobs,
|
|
||||||
make_circles,
|
|
||||||
make_classification,
|
|
||||||
make_moons,
|
|
||||||
)
|
|
||||||
|
|
||||||
from prototorch.datasets.abstract import NumpyDataset
|
from prototorch.datasets.abstract import NumpyDataset
|
||||||
|
|
||||||
|
from sklearn.datasets import (load_iris, make_blobs, make_circles,
|
||||||
|
make_classification, make_moons)
|
||||||
|
|
||||||
|
|
||||||
class Iris(NumpyDataset):
|
class Iris(NumpyDataset):
|
||||||
"""Iris Dataset by Ronald Fisher introduced in 1936.
|
"""Iris Dataset by Ronald Fisher introduced in 1936.
|
||||||
@ -42,10 +35,9 @@ class Iris(NumpyDataset):
|
|||||||
|
|
||||||
:param dims: select a subset of dimensions
|
:param dims: select a subset of dimensions
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, dims: Sequence[int] = None):
|
||||||
def __init__(self, dims: Sequence[int] | None = None):
|
|
||||||
x, y = load_iris(return_X_y=True)
|
x, y = load_iris(return_X_y=True)
|
||||||
if dims is not None:
|
if dims:
|
||||||
x = x[:, dims]
|
x = x[:, dims]
|
||||||
super().__init__(x, y)
|
super().__init__(x, y)
|
||||||
|
|
||||||
@ -57,20 +49,15 @@ class Blobs(NumpyDataset):
|
|||||||
https://scikit-learn.org/stable/datasets/sample_generators.html#sample-generators.
|
https://scikit-learn.org/stable/datasets/sample_generators.html#sample-generators.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def __init__(self,
|
||||||
def __init__(
|
num_samples: int = 300,
|
||||||
self,
|
num_features: int = 2,
|
||||||
num_samples: int = 300,
|
seed: Union[None, int] = 0):
|
||||||
num_features: int = 2,
|
x, y = make_blobs(num_samples,
|
||||||
seed: None | int = 0,
|
num_features,
|
||||||
):
|
centers=None,
|
||||||
x, y = make_blobs(
|
random_state=seed,
|
||||||
num_samples,
|
shuffle=False)
|
||||||
num_features,
|
|
||||||
centers=None,
|
|
||||||
random_state=seed,
|
|
||||||
shuffle=False,
|
|
||||||
)
|
|
||||||
super().__init__(x, y)
|
super().__init__(x, y)
|
||||||
|
|
||||||
|
|
||||||
@ -82,34 +69,29 @@ class Random(NumpyDataset):
|
|||||||
|
|
||||||
Note: n_classes * n_clusters_per_class <= 2**n_informative must satisfy.
|
Note: n_classes * n_clusters_per_class <= 2**n_informative must satisfy.
|
||||||
"""
|
"""
|
||||||
|
def __init__(self,
|
||||||
def __init__(
|
num_samples: int = 300,
|
||||||
self,
|
num_features: int = 2,
|
||||||
num_samples: int = 300,
|
num_classes: int = 2,
|
||||||
num_features: int = 2,
|
num_clusters: int = 2,
|
||||||
num_classes: int = 2,
|
num_informative: Union[None, int] = None,
|
||||||
num_clusters: int = 2,
|
separation: float = 1.0,
|
||||||
num_informative: None | int = None,
|
seed: Union[None, int] = 0):
|
||||||
separation: float = 1.0,
|
|
||||||
seed: None | int = 0,
|
|
||||||
):
|
|
||||||
if not num_informative:
|
if not num_informative:
|
||||||
import math
|
import math
|
||||||
num_informative = math.ceil(math.log2(num_classes * num_clusters))
|
num_informative = math.ceil(math.log2(num_classes * num_clusters))
|
||||||
if num_features < num_informative:
|
if num_features < num_informative:
|
||||||
warnings.warn("Generating more features than requested.")
|
warnings.warn("Generating more features than requested.")
|
||||||
num_features = num_informative
|
num_features = num_informative
|
||||||
x, y = make_classification(
|
x, y = make_classification(num_samples,
|
||||||
num_samples,
|
num_features,
|
||||||
num_features,
|
n_informative=num_informative,
|
||||||
n_informative=num_informative,
|
n_redundant=0,
|
||||||
n_redundant=0,
|
n_classes=num_classes,
|
||||||
n_classes=num_classes,
|
n_clusters_per_class=num_clusters,
|
||||||
n_clusters_per_class=num_clusters,
|
class_sep=separation,
|
||||||
class_sep=separation,
|
random_state=seed,
|
||||||
random_state=seed,
|
shuffle=False)
|
||||||
shuffle=False,
|
|
||||||
)
|
|
||||||
super().__init__(x, y)
|
super().__init__(x, y)
|
||||||
|
|
||||||
|
|
||||||
@ -122,21 +104,16 @@ class Circles(NumpyDataset):
|
|||||||
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_circles.html
|
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_circles.html
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def __init__(self,
|
||||||
def __init__(
|
num_samples: int = 300,
|
||||||
self,
|
noise: float = 0.3,
|
||||||
num_samples: int = 300,
|
factor: float = 0.8,
|
||||||
noise: float = 0.3,
|
seed: Union[None, int] = 0):
|
||||||
factor: float = 0.8,
|
x, y = make_circles(num_samples,
|
||||||
seed: None | int = 0,
|
noise=noise,
|
||||||
):
|
factor=factor,
|
||||||
x, y = make_circles(
|
random_state=seed,
|
||||||
num_samples,
|
shuffle=False)
|
||||||
noise=noise,
|
|
||||||
factor=factor,
|
|
||||||
random_state=seed,
|
|
||||||
shuffle=False,
|
|
||||||
)
|
|
||||||
super().__init__(x, y)
|
super().__init__(x, y)
|
||||||
|
|
||||||
|
|
||||||
@ -149,17 +126,12 @@ class Moons(NumpyDataset):
|
|||||||
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
|
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
def __init__(self,
|
||||||
def __init__(
|
num_samples: int = 300,
|
||||||
self,
|
noise: float = 0.3,
|
||||||
num_samples: int = 300,
|
seed: Union[None, int] = 0):
|
||||||
noise: float = 0.3,
|
x, y = make_moons(num_samples,
|
||||||
seed: None | int = 0,
|
noise=noise,
|
||||||
):
|
random_state=seed,
|
||||||
x, y = make_moons(
|
shuffle=False)
|
||||||
num_samples,
|
|
||||||
noise=noise,
|
|
||||||
random_state=seed,
|
|
||||||
shuffle=False,
|
|
||||||
)
|
|
||||||
super().__init__(x, y)
|
super().__init__(x, y)
|
||||||
|
@ -9,7 +9,6 @@ def make_spiral(num_samples=500, noise=0.3):
|
|||||||
|
|
||||||
For use in Prototorch use `prototorch.datasets.Spiral` instead.
|
For use in Prototorch use `prototorch.datasets.Spiral` instead.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def get_samples(n, delta_t):
|
def get_samples(n, delta_t):
|
||||||
points = []
|
points = []
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
@ -53,7 +52,6 @@ class Spiral(torch.utils.data.TensorDataset):
|
|||||||
:param num_samples: number of random samples
|
:param num_samples: number of random samples
|
||||||
:param noise: noise added to the spirals
|
:param noise: noise added to the spirals
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, num_samples: int = 500, noise: float = 0.3):
|
def __init__(self, num_samples: int = 500, noise: float = 0.3):
|
||||||
x, y = make_spiral(num_samples, noise)
|
x, y = make_spiral(num_samples, noise)
|
||||||
super().__init__(torch.Tensor(x), torch.LongTensor(y))
|
super().__init__(torch.Tensor(x), torch.LongTensor(y))
|
||||||
|
@ -36,14 +36,12 @@ Description:
|
|||||||
are determined by analytic chemistry.
|
are determined by analytic chemistry.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torchvision.datasets.utils import download_file_from_google_drive
|
|
||||||
|
|
||||||
from prototorch.datasets.abstract import ProtoDataset
|
from prototorch.datasets.abstract import ProtoDataset
|
||||||
|
from torchvision.datasets.utils import download_file_from_google_drive
|
||||||
|
|
||||||
|
|
||||||
class Tecator(ProtoDataset):
|
class Tecator(ProtoDataset):
|
||||||
@ -82,11 +80,13 @@ class Tecator(ProtoDataset):
|
|||||||
if self._check_exists():
|
if self._check_exists():
|
||||||
return
|
return
|
||||||
|
|
||||||
logging.debug("Making directories...")
|
if self.verbose:
|
||||||
|
print("Making directories...")
|
||||||
os.makedirs(self.raw_folder, exist_ok=True)
|
os.makedirs(self.raw_folder, exist_ok=True)
|
||||||
os.makedirs(self.processed_folder, exist_ok=True)
|
os.makedirs(self.processed_folder, exist_ok=True)
|
||||||
|
|
||||||
logging.debug("Downloading...")
|
if self.verbose:
|
||||||
|
print("Downloading...")
|
||||||
for fileid, md5 in self._resources:
|
for fileid, md5 in self._resources:
|
||||||
filename = "tecator.npz"
|
filename = "tecator.npz"
|
||||||
download_file_from_google_drive(fileid,
|
download_file_from_google_drive(fileid,
|
||||||
@ -94,7 +94,8 @@ class Tecator(ProtoDataset):
|
|||||||
filename=filename,
|
filename=filename,
|
||||||
md5=md5)
|
md5=md5)
|
||||||
|
|
||||||
logging.debug("Processing...")
|
if self.verbose:
|
||||||
|
print("Processing...")
|
||||||
with np.load(os.path.join(self.raw_folder, "tecator.npz"),
|
with np.load(os.path.join(self.raw_folder, "tecator.npz"),
|
||||||
allow_pickle=False) as f:
|
allow_pickle=False) as f:
|
||||||
x_train, y_train = f["x_train"], f["y_train"]
|
x_train, y_train = f["x_train"], f["y_train"]
|
||||||
@ -115,4 +116,5 @@ class Tecator(ProtoDataset):
|
|||||||
"wb") as f:
|
"wb") as f:
|
||||||
torch.save(test_set, f)
|
torch.save(test_set, f)
|
||||||
|
|
||||||
logging.debug("Done!")
|
if self.verbose:
|
||||||
|
print("Done!")
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
"""Exclusive-or (XOR) dataset for binary classification."""
|
|
||||||
|
|
||||||
import torch
|
|
||||||
|
|
||||||
|
|
||||||
def make_xor(num_samples=500):
|
|
||||||
x = torch.rand(num_samples, 2)
|
|
||||||
y = torch.zeros(num_samples)
|
|
||||||
y[torch.logical_and(x[:, 0] > 0.5, x[:, 1] < 0.5)] = 1
|
|
||||||
y[torch.logical_and(x[:, 1] > 0.5, x[:, 0] < 0.5)] = 1
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
|
|
||||||
class XOR(torch.utils.data.TensorDataset):
|
|
||||||
"""Exclusive-or (XOR) dataset for binary classification."""
|
|
||||||
|
|
||||||
def __init__(self, num_samples: int = 500):
|
|
||||||
x, y = make_xor(num_samples)
|
|
||||||
super().__init__(x, y)
|
|
@ -4,7 +4,6 @@ import torch
|
|||||||
|
|
||||||
|
|
||||||
class LambdaLayer(torch.nn.Module):
|
class LambdaLayer(torch.nn.Module):
|
||||||
|
|
||||||
def __init__(self, fn, name=None):
|
def __init__(self, fn, name=None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.fn = fn
|
self.fn = fn
|
||||||
@ -18,7 +17,6 @@ class LambdaLayer(torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class LossLayer(torch.nn.modules.loss._Loss):
|
class LossLayer(torch.nn.modules.loss._Loss):
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
fn,
|
fn,
|
||||||
name=None,
|
name=None,
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
"""ProtoTorch utils module"""
|
"""ProtoFlow utils module"""
|
||||||
|
|
||||||
from .colors import (
|
from .colors import hex_to_rgb, rgb_to_hex
|
||||||
get_colors,
|
|
||||||
get_legend_handles,
|
|
||||||
hex_to_rgb,
|
|
||||||
rgb_to_hex,
|
|
||||||
)
|
|
||||||
from .utils import (
|
from .utils import (
|
||||||
mesh2d,
|
mesh2d,
|
||||||
parse_data_arg,
|
parse_data_arg,
|
||||||
|
@ -1,13 +1,4 @@
|
|||||||
"""ProtoTorch color utilities"""
|
"""ProtoFlow color utilities"""
|
||||||
|
|
||||||
import matplotlib.lines as mlines
|
|
||||||
import torch
|
|
||||||
from matplotlib import cm
|
|
||||||
from matplotlib.colors import (
|
|
||||||
Normalize,
|
|
||||||
to_hex,
|
|
||||||
to_rgb,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def hex_to_rgb(hex_values):
|
def hex_to_rgb(hex_values):
|
||||||
@ -22,39 +13,3 @@ def rgb_to_hex(rgb_values):
|
|||||||
for v in rgb_values:
|
for v in rgb_values:
|
||||||
c = "%02x%02x%02x" % tuple(v)
|
c = "%02x%02x%02x" % tuple(v)
|
||||||
yield c
|
yield c
|
||||||
|
|
||||||
|
|
||||||
def get_colors(vmax, vmin=0, cmap="viridis"):
|
|
||||||
cmap = cm.get_cmap(cmap)
|
|
||||||
colornorm = Normalize(vmin=vmin, vmax=vmax)
|
|
||||||
colors = dict()
|
|
||||||
for c in range(vmin, vmax + 1):
|
|
||||||
colors[c] = to_hex(cmap(colornorm(c)))
|
|
||||||
return colors
|
|
||||||
|
|
||||||
|
|
||||||
def get_legend_handles(colors, labels, marker="dots", zero_indexed=False):
|
|
||||||
handles = list()
|
|
||||||
for color, label in zip(colors.values(), labels):
|
|
||||||
if marker == "dots":
|
|
||||||
handle = mlines.Line2D(
|
|
||||||
xdata=[],
|
|
||||||
ydata=[],
|
|
||||||
label=label,
|
|
||||||
color="white",
|
|
||||||
markerfacecolor=color,
|
|
||||||
marker="o",
|
|
||||||
markersize=10,
|
|
||||||
markeredgecolor="k",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
handle = mlines.Line2D(
|
|
||||||
xdata=[],
|
|
||||||
ydata=[],
|
|
||||||
label=label,
|
|
||||||
color=color,
|
|
||||||
marker="",
|
|
||||||
markersize=15,
|
|
||||||
)
|
|
||||||
handles.append(handle)
|
|
||||||
return handles
|
|
||||||
|
@ -1,45 +1,13 @@
|
|||||||
"""ProtoTorch utilities"""
|
"""ProtoFlow utilities"""
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from typing import (
|
from typing import Union
|
||||||
Dict,
|
|
||||||
Iterable,
|
|
||||||
List,
|
|
||||||
Optional,
|
|
||||||
Union,
|
|
||||||
)
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch.utils.data import DataLoader, Dataset
|
from torch.utils.data import DataLoader, Dataset
|
||||||
|
|
||||||
|
|
||||||
def generate_mesh(
|
|
||||||
minima: torch.TensorType,
|
|
||||||
maxima: torch.TensorType,
|
|
||||||
border: float = 1.0,
|
|
||||||
resolution: int = 100,
|
|
||||||
device: Optional[torch.device] = None,
|
|
||||||
):
|
|
||||||
# Apply Border
|
|
||||||
ptp = maxima - minima
|
|
||||||
shift = border * ptp
|
|
||||||
minima -= shift
|
|
||||||
maxima += shift
|
|
||||||
|
|
||||||
# Generate Mesh
|
|
||||||
minima = minima.to(device).unsqueeze(1)
|
|
||||||
maxima = maxima.to(device).unsqueeze(1)
|
|
||||||
|
|
||||||
factors = torch.linspace(0, 1, resolution, device=device)
|
|
||||||
marginals = factors * maxima + ((1 - factors) * minima)
|
|
||||||
|
|
||||||
single_dimensions = torch.meshgrid(*marginals)
|
|
||||||
mesh_input = torch.stack([dim.ravel() for dim in single_dimensions], dim=1)
|
|
||||||
|
|
||||||
return mesh_input, single_dimensions
|
|
||||||
|
|
||||||
|
|
||||||
def mesh2d(x=None, border: float = 1.0, resolution: int = 100):
|
def mesh2d(x=None, border: float = 1.0, resolution: int = 100):
|
||||||
if x is not None:
|
if x is not None:
|
||||||
x_shift = border * np.ptp(x[:, 0])
|
x_shift = border * np.ptp(x[:, 0])
|
||||||
@ -55,16 +23,15 @@ def mesh2d(x=None, border: float = 1.0, resolution: int = 100):
|
|||||||
return mesh, xx, yy
|
return mesh, xx, yy
|
||||||
|
|
||||||
|
|
||||||
def distribution_from_list(list_dist: List[int],
|
def distribution_from_list(list_dist: list[int], clabels: list[int] = []):
|
||||||
clabels: Optional[Iterable[int]] = None):
|
|
||||||
clabels = clabels or list(range(len(list_dist)))
|
clabels = clabels or list(range(len(list_dist)))
|
||||||
distribution = dict(zip(clabels, list_dist))
|
distribution = dict(zip(clabels, list_dist))
|
||||||
return distribution
|
return distribution
|
||||||
|
|
||||||
|
|
||||||
def parse_distribution(
|
def parse_distribution(user_distribution: Union[dict[int, int], dict[str, str],
|
||||||
user_distribution,
|
list[int], tuple[int]],
|
||||||
clabels: Optional[Iterable[int]] = None) -> Dict[int, int]:
|
clabels: list[int] = []) -> dict[int, int]:
|
||||||
"""Parse user-provided distribution.
|
"""Parse user-provided distribution.
|
||||||
|
|
||||||
Return a dictionary with integer keys that represent the class labels and
|
Return a dictionary with integer keys that represent the class labels and
|
||||||
@ -108,13 +75,9 @@ def parse_distribution(
|
|||||||
def parse_data_arg(data_arg: Union[Dataset, DataLoader, list, tuple]):
|
def parse_data_arg(data_arg: Union[Dataset, DataLoader, list, tuple]):
|
||||||
"""Return data and target as torch tensors."""
|
"""Return data and target as torch tensors."""
|
||||||
if isinstance(data_arg, Dataset):
|
if isinstance(data_arg, Dataset):
|
||||||
if hasattr(data_arg, "__len__"):
|
ds_size = len(data_arg)
|
||||||
ds_size = len(data_arg) # type: ignore
|
loader = DataLoader(data_arg, batch_size=ds_size)
|
||||||
loader = DataLoader(data_arg, batch_size=ds_size)
|
data, targets = next(iter(loader))
|
||||||
data, targets = next(iter(loader))
|
|
||||||
else:
|
|
||||||
emsg = f"Dataset {data_arg} is not sized (`__len__` unimplemented)."
|
|
||||||
raise TypeError(emsg)
|
|
||||||
|
|
||||||
elif isinstance(data_arg, DataLoader):
|
elif isinstance(data_arg, DataLoader):
|
||||||
data = torch.tensor([])
|
data = torch.tensor([])
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
[pylint]
|
[pylint]
|
||||||
disable =
|
disable =
|
||||||
too-many-arguments,
|
too-many-arguments,
|
||||||
too-few-public-methods,
|
too-few-public-methods,
|
||||||
fixme,
|
fixme,
|
||||||
|
|
||||||
|
|
||||||
[pycodestyle]
|
[pycodestyle]
|
||||||
max-line-length = 79
|
max-line-length = 79
|
||||||
@ -13,4 +12,4 @@ multi_line_output = 3
|
|||||||
include_trailing_comma = True
|
include_trailing_comma = True
|
||||||
force_grid_wrap = 3
|
force_grid_wrap = 3
|
||||||
use_parentheses = True
|
use_parentheses = True
|
||||||
line_length = 79
|
line_length = 79
|
59
setup.py
59
setup.py
@ -1,12 +1,10 @@
|
|||||||
"""
|
"""
|
||||||
|
_____ _ _______ _
|
||||||
######
|
| __ \ | | |__ __| | |
|
||||||
# # ##### #### ##### #### ##### #### ##### #### # #
|
| |__) | __ ___ | |_ ___ | | ___ _ __ ___| |__
|
||||||
# # # # # # # # # # # # # # # # # #
|
| ___/ '__/ _ \| __/ _ \| |/ _ \| '__/ __| '_ \
|
||||||
###### # # # # # # # # # # # # # ######
|
| | | | | (_) | || (_) | | (_) | | | (__| | | |
|
||||||
# ##### # # # # # # # # ##### # # #
|
|_| |_| \___/ \__\___/|_|\___/|_| \___|_| |_|
|
||||||
# # # # # # # # # # # # # # # # #
|
|
||||||
# # # #### # #### # #### # # #### # #
|
|
||||||
|
|
||||||
ProtoTorch Core Package
|
ProtoTorch Core Package
|
||||||
"""
|
"""
|
||||||
@ -15,24 +13,20 @@ from setuptools import find_packages, setup
|
|||||||
PROJECT_URL = "https://github.com/si-cim/prototorch"
|
PROJECT_URL = "https://github.com/si-cim/prototorch"
|
||||||
DOWNLOAD_URL = "https://github.com/si-cim/prototorch.git"
|
DOWNLOAD_URL = "https://github.com/si-cim/prototorch.git"
|
||||||
|
|
||||||
with open("README.md", encoding="utf-8") as fh:
|
with open("README.md", "r") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
|
|
||||||
INSTALL_REQUIRES = [
|
INSTALL_REQUIRES = [
|
||||||
"torch>=2.0.0",
|
"torch>=1.3.1",
|
||||||
"torchvision",
|
"torchvision>=0.5.0",
|
||||||
"numpy",
|
"numpy>=1.9.1",
|
||||||
"scikit-learn",
|
"sklearn",
|
||||||
"matplotlib",
|
|
||||||
]
|
]
|
||||||
DATASETS = [
|
DATASETS = [
|
||||||
"requests",
|
"requests",
|
||||||
"tqdm",
|
"tqdm",
|
||||||
]
|
]
|
||||||
DEV = [
|
DEV = ["bumpversion"]
|
||||||
"bump2version",
|
|
||||||
"pre-commit",
|
|
||||||
]
|
|
||||||
DOCS = [
|
DOCS = [
|
||||||
"recommonmark",
|
"recommonmark",
|
||||||
"sphinx",
|
"sphinx",
|
||||||
@ -41,17 +35,15 @@ DOCS = [
|
|||||||
"sphinx-autodoc-typehints",
|
"sphinx-autodoc-typehints",
|
||||||
]
|
]
|
||||||
EXAMPLES = [
|
EXAMPLES = [
|
||||||
|
"matplotlib",
|
||||||
"torchinfo",
|
"torchinfo",
|
||||||
]
|
]
|
||||||
TESTS = [
|
TESTS = ["codecov", "pytest"]
|
||||||
"flake8",
|
|
||||||
"pytest",
|
|
||||||
]
|
|
||||||
ALL = DATASETS + DEV + DOCS + EXAMPLES + TESTS
|
ALL = DATASETS + DEV + DOCS + EXAMPLES + TESTS
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="prototorch",
|
name="prototorch",
|
||||||
version="0.7.6",
|
version="0.5.0",
|
||||||
description="Highly extensible, GPU-supported "
|
description="Highly extensible, GPU-supported "
|
||||||
"Learning Vector Quantization (LVQ) toolbox "
|
"Learning Vector Quantization (LVQ) toolbox "
|
||||||
"built using PyTorch and its nn API.",
|
"built using PyTorch and its nn API.",
|
||||||
@ -62,33 +54,30 @@ setup(
|
|||||||
url=PROJECT_URL,
|
url=PROJECT_URL,
|
||||||
download_url=DOWNLOAD_URL,
|
download_url=DOWNLOAD_URL,
|
||||||
license="MIT",
|
license="MIT",
|
||||||
python_requires=">=3.8",
|
|
||||||
install_requires=INSTALL_REQUIRES,
|
install_requires=INSTALL_REQUIRES,
|
||||||
extras_require={
|
extras_require={
|
||||||
"datasets": DATASETS,
|
|
||||||
"dev": DEV,
|
|
||||||
"docs": DOCS,
|
"docs": DOCS,
|
||||||
|
"datasets": DATASETS,
|
||||||
"examples": EXAMPLES,
|
"examples": EXAMPLES,
|
||||||
"tests": TESTS,
|
"tests": TESTS,
|
||||||
"all": ALL,
|
"all": ALL,
|
||||||
},
|
},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
|
"Development Status :: 2 - Pre-Alpha",
|
||||||
"Environment :: Console",
|
"Environment :: Console",
|
||||||
"Natural Language :: English",
|
|
||||||
"Development Status :: 4 - Beta",
|
|
||||||
"Intended Audience :: Developers",
|
"Intended Audience :: Developers",
|
||||||
"Intended Audience :: Education",
|
"Intended Audience :: Education",
|
||||||
"Intended Audience :: Science/Research",
|
"Intended Audience :: Science/Research",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Natural Language :: English",
|
||||||
|
"Programming Language :: Python :: 3.6",
|
||||||
|
"Programming Language :: Python :: 3.7",
|
||||||
|
"Programming Language :: Python :: 3.8",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
||||||
"Topic :: Software Development :: Libraries",
|
"Topic :: Software Development :: Libraries",
|
||||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||||
"License :: OSI Approved :: MIT License",
|
|
||||||
"Operating System :: OS Independent",
|
|
||||||
"Programming Language :: Python :: 3",
|
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
|
||||||
"Programming Language :: Python :: 3.10",
|
|
||||||
"Programming Language :: Python :: 3.11",
|
|
||||||
],
|
],
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
|
@ -245,45 +245,33 @@ def test_random_reasonings_init_channels_not_first():
|
|||||||
|
|
||||||
# Transform initializers
|
# Transform initializers
|
||||||
def test_eye_transform_init_square():
|
def test_eye_transform_init_square():
|
||||||
t = pt.initializers.EyeLinearTransformInitializer()
|
t = pt.initializers.EyeTransformInitializer()
|
||||||
I = t.generate(3, 3)
|
I = t.generate(3, 3)
|
||||||
assert torch.allclose(I, torch.eye(3))
|
assert torch.allclose(I, torch.eye(3))
|
||||||
|
|
||||||
|
|
||||||
def test_eye_transform_init_narrow():
|
def test_eye_transform_init_narrow():
|
||||||
t = pt.initializers.EyeLinearTransformInitializer()
|
t = pt.initializers.EyeTransformInitializer()
|
||||||
actual = t.generate(3, 2)
|
actual = t.generate(3, 2)
|
||||||
desired = torch.Tensor([[1, 0], [0, 1], [0, 0]])
|
desired = torch.Tensor([[1, 0], [0, 1], [0, 0]])
|
||||||
assert torch.allclose(actual, desired)
|
assert torch.allclose(actual, desired)
|
||||||
|
|
||||||
|
|
||||||
def test_eye_transform_init_wide():
|
def test_eye_transform_init_wide():
|
||||||
t = pt.initializers.EyeLinearTransformInitializer()
|
t = pt.initializers.EyeTransformInitializer()
|
||||||
actual = t.generate(2, 3)
|
actual = t.generate(2, 3)
|
||||||
desired = torch.Tensor([[1, 0, 0], [0, 1, 0]])
|
desired = torch.Tensor([[1, 0, 0], [0, 1, 0]])
|
||||||
assert torch.allclose(actual, desired)
|
assert torch.allclose(actual, desired)
|
||||||
|
|
||||||
|
|
||||||
# Transforms
|
# Transforms
|
||||||
def test_linear_transform_default_eye_init():
|
def test_linear_transform():
|
||||||
l = pt.transforms.LinearTransform(2, 4)
|
l = pt.transforms.LinearTransform(2, 4)
|
||||||
actual = l.weights
|
actual = l.weights
|
||||||
desired = torch.Tensor([[1, 0, 0, 0], [0, 1, 0, 0]])
|
desired = torch.Tensor([[1, 0, 0, 0], [0, 1, 0, 0]])
|
||||||
assert torch.allclose(actual, desired)
|
assert torch.allclose(actual, desired)
|
||||||
|
|
||||||
|
|
||||||
def test_linear_transform_forward():
|
|
||||||
l = pt.transforms.LinearTransform(4, 2)
|
|
||||||
actual_weights = l.weights
|
|
||||||
desired_weights = torch.Tensor([[1, 0], [0, 1], [0, 0], [0, 0]])
|
|
||||||
assert torch.allclose(actual_weights, desired_weights)
|
|
||||||
actual_outputs = l(torch.Tensor([[1.1, 2.2, 3.3, 4.4], \
|
|
||||||
[1.1, 2.2, 3.3, 4.4], \
|
|
||||||
[5.5, 6.6, 7.7, 8.8]]))
|
|
||||||
desired_outputs = torch.Tensor([[1.1, 2.2], [1.1, 2.2], [5.5, 6.6]])
|
|
||||||
assert torch.allclose(actual_outputs, desired_outputs)
|
|
||||||
|
|
||||||
|
|
||||||
def test_linear_transform_zeros_init():
|
def test_linear_transform_zeros_init():
|
||||||
l = pt.transforms.LinearTransform(
|
l = pt.transforms.LinearTransform(
|
||||||
in_dim=2,
|
in_dim=2,
|
||||||
@ -404,7 +392,6 @@ def test_glvq_loss_one_hot_unequal():
|
|||||||
|
|
||||||
# Activations
|
# Activations
|
||||||
class TestActivations(unittest.TestCase):
|
class TestActivations(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.flist = ["identity", "sigmoid_beta", "swish_beta"]
|
self.flist = ["identity", "sigmoid_beta", "swish_beta"]
|
||||||
self.x = torch.randn(1024, 1)
|
self.x = torch.randn(1024, 1)
|
||||||
@ -419,7 +406,6 @@ class TestActivations(unittest.TestCase):
|
|||||||
self.assertTrue(iscallable)
|
self.assertTrue(iscallable)
|
||||||
|
|
||||||
def test_callable_deserialization(self):
|
def test_callable_deserialization(self):
|
||||||
|
|
||||||
def dummy(x, **kwargs):
|
def dummy(x, **kwargs):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
@ -464,7 +450,6 @@ class TestActivations(unittest.TestCase):
|
|||||||
|
|
||||||
# Competitions
|
# Competitions
|
||||||
class TestCompetitions(unittest.TestCase):
|
class TestCompetitions(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -518,7 +503,6 @@ class TestCompetitions(unittest.TestCase):
|
|||||||
|
|
||||||
# Pooling
|
# Pooling
|
||||||
class TestPooling(unittest.TestCase):
|
class TestPooling(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -619,7 +603,6 @@ class TestPooling(unittest.TestCase):
|
|||||||
|
|
||||||
# Distances
|
# Distances
|
||||||
class TestDistances(unittest.TestCase):
|
class TestDistances(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.nx, self.mx = 32, 2048
|
self.nx, self.mx = 32, 2048
|
||||||
self.ny, self.my = 8, 2048
|
self.ny, self.my = 8, 2048
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
"""ProtoTorch datasets test suite"""
|
"""ProtoTorch datasets test suite"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -11,7 +12,6 @@ from prototorch.datasets.abstract import Dataset, ProtoDataset
|
|||||||
|
|
||||||
|
|
||||||
class TestAbstract(unittest.TestCase):
|
class TestAbstract(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.ds = Dataset("./artifacts")
|
self.ds = Dataset("./artifacts")
|
||||||
|
|
||||||
@ -28,7 +28,6 @@ class TestAbstract(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class TestProtoDataset(unittest.TestCase):
|
class TestProtoDataset(unittest.TestCase):
|
||||||
|
|
||||||
def test_download(self):
|
def test_download(self):
|
||||||
with self.assertRaises(NotImplementedError):
|
with self.assertRaises(NotImplementedError):
|
||||||
_ = ProtoDataset("./artifacts", download=True)
|
_ = ProtoDataset("./artifacts", download=True)
|
||||||
@ -39,7 +38,6 @@ class TestProtoDataset(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class TestNumpyDataset(unittest.TestCase):
|
class TestNumpyDataset(unittest.TestCase):
|
||||||
|
|
||||||
def test_list_init(self):
|
def test_list_init(self):
|
||||||
ds = pt.datasets.NumpyDataset([1], [1])
|
ds = pt.datasets.NumpyDataset([1], [1])
|
||||||
self.assertEqual(len(ds), 1)
|
self.assertEqual(len(ds), 1)
|
||||||
@ -51,33 +49,13 @@ class TestNumpyDataset(unittest.TestCase):
|
|||||||
self.assertEqual(len(ds), 3)
|
self.assertEqual(len(ds), 3)
|
||||||
|
|
||||||
|
|
||||||
class TestCSVDataset(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
data = np.random.rand(100, 4)
|
|
||||||
targets = np.random.randint(2, size=(100, 1))
|
|
||||||
arr = np.hstack([data, targets])
|
|
||||||
if not os.path.exists("./artifacts"):
|
|
||||||
os.mkdir("./artifacts")
|
|
||||||
np.savetxt("./artifacts/test.csv", arr, delimiter=",")
|
|
||||||
|
|
||||||
def test_len(self):
|
|
||||||
ds = pt.datasets.CSVDataset("./artifacts/test.csv")
|
|
||||||
self.assertEqual(len(ds), 100)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
os.remove("./artifacts/test.csv")
|
|
||||||
|
|
||||||
|
|
||||||
class TestSpiral(unittest.TestCase):
|
class TestSpiral(unittest.TestCase):
|
||||||
|
|
||||||
def test_init(self):
|
def test_init(self):
|
||||||
ds = pt.datasets.Spiral(num_samples=10)
|
ds = pt.datasets.Spiral(num_samples=10)
|
||||||
self.assertEqual(len(ds), 10)
|
self.assertEqual(len(ds), 10)
|
||||||
|
|
||||||
|
|
||||||
class TestIris(unittest.TestCase):
|
class TestIris(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.ds = pt.datasets.Iris()
|
self.ds = pt.datasets.Iris()
|
||||||
|
|
||||||
@ -93,94 +71,90 @@ class TestIris(unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class TestBlobs(unittest.TestCase):
|
class TestBlobs(unittest.TestCase):
|
||||||
|
|
||||||
def test_size(self):
|
def test_size(self):
|
||||||
ds = pt.datasets.Blobs(num_samples=10)
|
ds = pt.datasets.Blobs(num_samples=10)
|
||||||
self.assertEqual(len(ds), 10)
|
self.assertEqual(len(ds), 10)
|
||||||
|
|
||||||
|
|
||||||
class TestRandom(unittest.TestCase):
|
class TestRandom(unittest.TestCase):
|
||||||
|
|
||||||
def test_size(self):
|
def test_size(self):
|
||||||
ds = pt.datasets.Random(num_samples=10)
|
ds = pt.datasets.Random(num_samples=10)
|
||||||
self.assertEqual(len(ds), 10)
|
self.assertEqual(len(ds), 10)
|
||||||
|
|
||||||
|
|
||||||
class TestCircles(unittest.TestCase):
|
class TestCircles(unittest.TestCase):
|
||||||
|
|
||||||
def test_size(self):
|
def test_size(self):
|
||||||
ds = pt.datasets.Circles(num_samples=10)
|
ds = pt.datasets.Circles(num_samples=10)
|
||||||
self.assertEqual(len(ds), 10)
|
self.assertEqual(len(ds), 10)
|
||||||
|
|
||||||
|
|
||||||
class TestMoons(unittest.TestCase):
|
class TestMoons(unittest.TestCase):
|
||||||
|
|
||||||
def test_size(self):
|
def test_size(self):
|
||||||
ds = pt.datasets.Moons(num_samples=10)
|
ds = pt.datasets.Moons(num_samples=10)
|
||||||
self.assertEqual(len(ds), 10)
|
self.assertEqual(len(ds), 10)
|
||||||
|
|
||||||
|
|
||||||
# class TestTecator(unittest.TestCase):
|
class TestTecator(unittest.TestCase):
|
||||||
# def setUp(self):
|
def setUp(self):
|
||||||
# self.artifacts_dir = "./artifacts/Tecator"
|
self.artifacts_dir = "./artifacts/Tecator"
|
||||||
# self._remove_artifacts()
|
self._remove_artifacts()
|
||||||
|
|
||||||
# def _remove_artifacts(self):
|
def _remove_artifacts(self):
|
||||||
# if os.path.exists(self.artifacts_dir):
|
if os.path.exists(self.artifacts_dir):
|
||||||
# shutil.rmtree(self.artifacts_dir)
|
shutil.rmtree(self.artifacts_dir)
|
||||||
|
|
||||||
# def test_download_false(self):
|
def test_download_false(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# self._remove_artifacts()
|
self._remove_artifacts()
|
||||||
# with self.assertRaises(RuntimeError):
|
with self.assertRaises(RuntimeError):
|
||||||
# _ = pt.datasets.Tecator(rootdir, download=False)
|
_ = pt.datasets.Tecator(rootdir, download=False)
|
||||||
|
|
||||||
# def test_download_caching(self):
|
def test_download_caching(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# _ = pt.datasets.Tecator(rootdir, download=True, verbose=False)
|
_ = pt.datasets.Tecator(rootdir, download=True, verbose=False)
|
||||||
# _ = pt.datasets.Tecator(rootdir, download=False, verbose=False)
|
_ = pt.datasets.Tecator(rootdir, download=False, verbose=False)
|
||||||
|
|
||||||
# def test_repr(self):
|
def test_repr(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# train = pt.datasets.Tecator(rootdir, download=True, verbose=True)
|
train = pt.datasets.Tecator(rootdir, download=True, verbose=True)
|
||||||
# self.assertTrue("Split: Train" in train.__repr__())
|
self.assertTrue("Split: Train" in train.__repr__())
|
||||||
|
|
||||||
# def test_download_train(self):
|
def test_download_train(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# train = pt.datasets.Tecator(root=rootdir,
|
train = pt.datasets.Tecator(root=rootdir,
|
||||||
# train=True,
|
train=True,
|
||||||
# download=True,
|
download=True,
|
||||||
# verbose=False)
|
verbose=False)
|
||||||
# train = pt.datasets.Tecator(root=rootdir, download=True, verbose=False)
|
train = pt.datasets.Tecator(root=rootdir, download=True, verbose=False)
|
||||||
# x_train, y_train = train.data, train.targets
|
x_train, y_train = train.data, train.targets
|
||||||
# self.assertEqual(x_train.shape[0], 144)
|
self.assertEqual(x_train.shape[0], 144)
|
||||||
# self.assertEqual(y_train.shape[0], 144)
|
self.assertEqual(y_train.shape[0], 144)
|
||||||
# self.assertEqual(x_train.shape[1], 100)
|
self.assertEqual(x_train.shape[1], 100)
|
||||||
|
|
||||||
# def test_download_test(self):
|
def test_download_test(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
||||||
# x_test, y_test = test.data, test.targets
|
x_test, y_test = test.data, test.targets
|
||||||
# self.assertEqual(x_test.shape[0], 71)
|
self.assertEqual(x_test.shape[0], 71)
|
||||||
# self.assertEqual(y_test.shape[0], 71)
|
self.assertEqual(y_test.shape[0], 71)
|
||||||
# self.assertEqual(x_test.shape[1], 100)
|
self.assertEqual(x_test.shape[1], 100)
|
||||||
|
|
||||||
# def test_class_to_idx(self):
|
def test_class_to_idx(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
||||||
# _ = test.class_to_idx
|
_ = test.class_to_idx
|
||||||
|
|
||||||
# def test_getitem(self):
|
def test_getitem(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
||||||
# x, y = test[0]
|
x, y = test[0]
|
||||||
# self.assertEqual(x.shape[0], 100)
|
self.assertEqual(x.shape[0], 100)
|
||||||
# self.assertIsInstance(y, int)
|
self.assertIsInstance(y, int)
|
||||||
|
|
||||||
# def test_loadable_with_dataloader(self):
|
def test_loadable_with_dataloader(self):
|
||||||
# rootdir = self.artifacts_dir.rpartition("/")[0]
|
rootdir = self.artifacts_dir.rpartition("/")[0]
|
||||||
# test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
test = pt.datasets.Tecator(root=rootdir, train=False, verbose=False)
|
||||||
# _ = torch.utils.data.DataLoader(test, batch_size=64, shuffle=True)
|
_ = torch.utils.data.DataLoader(test, batch_size=64, shuffle=True)
|
||||||
|
|
||||||
# def tearDown(self):
|
def tearDown(self):
|
||||||
# self._remove_artifacts()
|
self._remove_artifacts()
|
||||||
|
Loading…
Reference in New Issue
Block a user