Compare commits

..

12 Commits

Author SHA1 Message Date
Alexander Engelsberger
0788718c31
ci: cache test IV 2021-11-05 15:14:59 +01:00
Alexander Engelsberger
4f5c4ebe8f
ci: cache test III 2021-11-05 15:09:21 +01:00
Alexander Engelsberger
ae2a8e54ef
ci: cache test II 2021-11-05 14:59:01 +01:00
Alexander Engelsberger
d9be100c1f
ci: use pip cache in jenkins 2021-11-05 14:55:12 +01:00
Alexander Engelsberger
9d1dc7320f
ci: fix jenkinsfile 2021-11-05 14:32:37 +01:00
Alexander Engelsberger
d11ab71b7e
ci: unit tests in jenkins 2021-11-05 14:30:08 +01:00
Alexander Engelsberger
59037e1a50
ci: upgrade pip before install 2021-11-04 10:53:51 +01:00
Alexander Engelsberger
a19b99be82
ci: container debugging III 2021-11-04 10:50:44 +01:00
Alexander Engelsberger
f7e7558338
ci: container debugging II 2021-11-04 10:42:53 +01:00
Alexander Engelsberger
d57648f9d6
ci: container debugging 2021-11-04 10:41:28 +01:00
Alexander Engelsberger
d24f580bf0
ci: install dependencies with user flag 2021-11-04 09:55:58 +01:00
Jensun Ravichandran
916973c3e8 ci: migrate to jenkins 2021-11-03 16:26:32 +01:00
36 changed files with 281 additions and 491 deletions

View File

@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.7.6
current_version = 0.7.1
commit = True
tag = True
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)

7
.ci/python310.Dockerfile Normal file
View File

@ -0,0 +1,7 @@
FROM python:3.9
RUN adduser --uid 1000 jenkins
USER jenkins
RUN mkdir -p /home/jenkins/.cache/pip

7
.ci/python36.Dockerfile Normal file
View File

@ -0,0 +1,7 @@
FROM python:3.6
RUN adduser --uid 1000 jenkins
USER jenkins
RUN mkdir -p /home/jenkins/.cache/pip

15
.codacy.yml Normal file
View File

@ -0,0 +1,15 @@
# To validate the contents of your configuration file
# run the following command in the folder where the configuration file is located:
# codacy-analysis-cli validate-configuration --directory `pwd`
# To analyse, run:
# codacy-analysis-cli analyse --tool remark-lint --directory `pwd`
---
engines:
pylintpython3:
exclude_paths:
- config/engines.yml
remark-lint:
exclude_paths:
- config/engines.yml
exclude_paths:
- 'tests/**'

2
.codecov.yml Normal file
View File

@ -0,0 +1,2 @@
comment:
require_changes: yes

View File

@ -5,71 +5,33 @@ name: tests
on:
push:
branches: [ master, dev ]
pull_request:
branches: [master]
branches: [ master ]
jobs:
style:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- uses: pre-commit/action@v3.0.0
compatibility:
needs: style
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
os: [ubuntu-latest, windows-latest]
exclude:
- os: windows-latest
python-version: "3.8"
- os: windows-latest
python-version: "3.9"
- os: windows-latest
python-version: "3.10"
build:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- name: Test with pytest
run: |
pytest
publish_pypi:
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
needs: compatibility
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.11"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
pip install wheel
- name: Build package
run: python setup.py sdist bdist_wheel
- name: Publish a Python distribution to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v1
with:
python-version: 3.9
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install .[all]
- name: Lint with flake8
run: |
pip install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pip install pytest
pytest

View File

@ -3,7 +3,7 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.0.1
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
@ -13,36 +13,36 @@ repos:
- id: check-case-conflict
- repo: https://github.com/myint/autoflake
rev: v2.1.1
rev: v1.4
hooks:
- id: autoflake
- repo: http://github.com/PyCQA/isort
rev: 5.12.0
rev: 5.8.0
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.3.0
rev: v0.902
hooks:
- id: mypy
files: prototorch
additional_dependencies: [types-pkg_resources]
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.32.0
rev: v0.31.0
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
rev: v1.9.0
hooks:
- id: python-use-type-annotations
- id: python-no-log-warn
- id: python-check-blanket-noqa
- repo: https://github.com/asottile/pyupgrade
rev: v3.7.0
rev: v2.19.4
hooks:
- id: pyupgrade

41
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,41 @@
pipeline {
agent none
stages {
stage('Unit Tests') {
parallel {
stage('3.6'){
agent{
dockerfile {
filename 'python36.Dockerfile'
dir '.ci'
args '-v pip-cache:/home/jenkins/.cache/pip'
}
}
steps {
sh 'pip install pip --upgrade --progress-bar off'
sh 'pip install .[all] --progress-bar off'
sh '~/.local/bin/pytest -v --junitxml=reports/result.xml --cov=prototorch/ --cov-report=xml:reports/coverage.xml'
cobertura coberturaReportFile: 'reports/coverage.xml'
junit 'reports/**/*.xml'
}
}
stage('3.10'){
agent{
dockerfile {
filename 'python310.Dockerfile'
dir '.ci'
args '-v pip-cache:/home/jenkins/.cache/pip'
}
}
steps {
sh 'pip install pip --upgrade --progress-bar off'
sh 'pip install .[all] --progress-bar off'
sh '~/.local/bin/pytest -v --junitxml=reports/result.xml --cov=prototorch/ --cov-report=xml:reports/coverage.xml'
cobertura coberturaReportFile: 'reports/coverage.xml'
junit 'reports/**/*.xml'
}
}
}
}
}
}

View File

@ -1,7 +1,6 @@
MIT License
Copyright (c) 2020 Saxon Institute for Computational Intelligence and Machine
Learning (SICIM)
Copyright (c) 2020 si-cim
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -2,9 +2,12 @@
![ProtoTorch Logo](https://prototorch.readthedocs.io/en/latest/_static/horizontal-lockup.png)
[![Build Status](https://api.travis-ci.com/si-cim/prototorch.svg?branch=master)](https://travis-ci.com/github/si-cim/prototorch)
![tests](https://github.com/si-cim/prototorch/workflows/tests/badge.svg)
[![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/si-cim/prototorch?color=yellow&label=version)](https://github.com/si-cim/prototorch/releases)
[![PyPI](https://img.shields.io/pypi/v/prototorch)](https://pypi.org/project/prototorch/)
[![codecov](https://codecov.io/gh/si-cim/prototorch/branch/master/graph/badge.svg)](https://codecov.io/gh/si-cim/prototorch)
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/76273904bf9343f0a8b29cd8aca242e7)](https://www.codacy.com/gh/si-cim/prototorch?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=si-cim/prototorch&amp;utm_campaign=Badge_Grade)
[![GitHub license](https://img.shields.io/github/license/si-cim/prototorch)](https://github.com/si-cim/prototorch/blob/master/LICENSE)
*Tensorflow users, see:* [ProtoFlow](https://github.com/si-cim/protoflow)

46
deprecated.travis.yml Normal file
View File

@ -0,0 +1,46 @@
dist: bionic
sudo: false
language: python
python:
- 3.9
- 3.8
- 3.7
- 3.6
cache:
directories:
- "$HOME/.cache/pip"
- "./tests/artifacts"
- "$HOME/datasets"
install:
- pip install .[all] --progress-bar off
# Generate code coverage report
script:
- coverage run -m pytest
# Push the results to codecov
after_success:
- bash <(curl -s https://codecov.io/bash)
# Publish on PyPI
jobs:
include:
- stage: build
python: 3.9
script: echo "Starting Pypi build"
deploy:
provider: pypi
username: __token__
distributions: "sdist bdist_wheel"
password:
secure: rVQNCxKIuiEtMz4zLSsjdt6spG7cf3miKN5eqjxZfcELALHxAV4w/+CideQObOn3u9emmxb87R9XWKcogqK2MXqnuIcY4mWg7HUqaip1bhz/4YiVXjFILcG6itjX9IUF1DrtjKKRk6xryucSZcEB7yTcXz1hQTb768KWlLlKOVTRNwr7j07eyeafexz/L2ANQCqfOZgS4b0k2AMeDBRPykPULtyeneEFlb6MJZ2MxeqtTNVK4b/6VsQSZwQ9jGJNGWonn5Y287gHmzvEcymSJogTe2taxGBWawPnOsibws9v88DEAHdsEvYdnqEE3hFl0R5La2Lkjd8CjNUYegxioQ57i3WNS3iksq10ZLMCbH29lb9YPG7r6Y8z9H85735kV2gKLdf+o7SPS03TRgjSZKN6pn4pLG0VWkxC6l8VfLuJnRNTHX4g6oLQwOWIBbxybn9Zw/yLjAXAJNgBHt5v86H6Jfi1Va4AhEV6itkoH9IM3/uDhrE/mmorqyVled/CPNtBWNTyoDevLNxMUDnbuhH0JzLki+VOjKnTxEfq12JB8X9faFG5BjvU9oGjPPewrp5DGGzg6KDra7dikciWUxE1eTFFDhMyG1CFGcjKlDvlAGHyI6Kih35egGUeq+N/pitr2330ftM9Dm4rWpOTxPyCI89bXKssx/MgmLG7kSM=
on:
tags: true
skip_existing: true
# The password is encrypted with:
# `cd prototorch && travis encrypt your-pypi-api-token --add deploy.password`
# See https://docs.travis-ci.com/user/deployment/pypi and
# https://github.com/travis-ci/travis.rb#installation
# for more details
# Note: The encrypt command does not work well in ZSH.

View File

@ -23,7 +23,7 @@ author = "Jensun Ravichandran"
# The full version, including alpha/beta/rc tags
#
release = "0.7.6"
release = "0.7.1"
# -- General configuration ---------------------------------------------------
@ -120,7 +120,7 @@ html_css_files = [
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "prototorchdoc"
htmlhelp_basename = "protoflowdoc"
# -- Options for LaTeX output ---------------------------------------------

View File

@ -1,7 +1,5 @@
"""ProtoTorch CBC example using 2D Iris data."""
import logging
import torch
from matplotlib import pyplot as plt
@ -9,7 +7,6 @@ import prototorch as pt
class CBC(torch.nn.Module):
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self.components_layer = pt.components.ReasoningComponents(
@ -26,7 +23,6 @@ class CBC(torch.nn.Module):
class VisCBC2D():
def __init__(self, model, data):
self.model = model
self.x_train, self.y_train = pt.utils.parse_data_arg(data)
@ -36,7 +32,7 @@ class VisCBC2D():
self.resolution = 100
self.cmap = "viridis"
def on_train_epoch_end(self):
def on_epoch_end(self):
x_train, y_train = self.x_train, self.y_train
_components = self.model.components_layer._components.detach()
ax = self.fig.gca()
@ -96,5 +92,5 @@ if __name__ == "__main__":
correct += (y_pred.argmax(1) == y).float().sum(0)
acc = 100 * correct / len(train_ds)
logging.info(f"Epoch: {epoch} Accuracy: {acc:05.02f}%")
vis.on_train_epoch_end()
print(f"Epoch: {epoch} Accuracy: {acc:05.02f}%")
vis.on_epoch_end()

View File

@ -1,76 +0,0 @@
"""ProtoTorch GMLVQ example using Iris data."""
import torch
import prototorch as pt
class GMLVQ(torch.nn.Module):
"""
Implementation of Generalized Matrix Learning Vector Quantization.
"""
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self.components_layer = pt.components.LabeledComponents(
distribution=[1, 1, 1],
components_initializer=pt.initializers.SMCI(data, noise=0.1),
)
self.backbone = pt.transforms.Omega(
len(data[0][0]),
len(data[0][0]),
pt.initializers.RandomLinearTransformInitializer(),
)
def forward(self, data):
"""
Forward function that returns a tuple of dissimilarities and label information.
Feed into GLVQLoss to get a complete GMLVQ model.
"""
components, label = self.components_layer()
latent_x = self.backbone(data)
latent_components = self.backbone(components)
distance = pt.distances.squared_euclidean_distance(
latent_x, latent_components)
return distance, label
def predict(self, data):
"""
The GMLVQ has a modified prediction step, where a competition layer is applied.
"""
components, label = self.components_layer()
distance = pt.distances.squared_euclidean_distance(data, components)
winning_label = pt.competitions.wtac(distance, label)
return winning_label
if __name__ == "__main__":
train_ds = pt.datasets.Iris()
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=32)
model = GMLVQ(train_ds)
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)
criterion = pt.losses.GLVQLoss()
for epoch in range(200):
correct = 0.0
for x, y in train_loader:
d, labels = model(x)
loss = criterion(d, y, labels).mean(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred = model.predict(x)
correct += (y_pred == y).float().sum(0)
acc = 100 * correct / len(train_ds)
print(f"Epoch: {epoch} Accuracy: {acc:05.02f}%")

View File

@ -1,23 +1,28 @@
"""ProtoTorch package"""
import pkgutil
from typing import List
import pkg_resources
from . import datasets # noqa: F401
from . import nn # noqa: F401
from . import utils # noqa: F401
from .core import competitions # noqa: F401
from .core import components # noqa: F401
from .core import distances # noqa: F401
from .core import initializers # noqa: F401
from .core import losses # noqa: F401
from .core import pooling # noqa: F401
from .core import similarities # noqa: F401
from .core import transforms # noqa: F401
from . import (
datasets,
nn,
utils,
)
from .core import (
competitions,
components,
distances,
initializers,
losses,
pooling,
similarities,
transforms,
)
# Core Setup
__version__ = "0.7.6"
__version__ = "0.7.1"
__all_core__ = [
"competitions",
@ -35,7 +40,7 @@ __all_core__ = [
]
# Plugin Loader
__path__ = pkgutil.extend_path(__path__, __name__)
__path__: List[str] = pkgutil.extend_path(__path__, __name__)
def discover_plugins():

View File

@ -38,7 +38,7 @@ def cbcc(detections: torch.Tensor, reasonings: torch.Tensor):
pk = A
nk = (1 - A) * B
numerator = (detections @ (pk - nk).T) + nk.sum(1)
probs = numerator / ((pk + nk).sum(1) + 1e-8)
probs = numerator / (pk + nk).sum(1)
return probs
@ -48,7 +48,6 @@ class WTAC(torch.nn.Module):
Thin wrapper over the `wtac` function.
"""
def forward(self, distances, labels): # pylint: disable=no-self-use
return wtac(distances, labels)
@ -59,7 +58,6 @@ class LTAC(torch.nn.Module):
Thin wrapper over the `wtac` function.
"""
def forward(self, probs, labels): # pylint: disable=no-self-use
return wtac(-1.0 * probs, labels)
@ -70,7 +68,6 @@ class KNNC(torch.nn.Module):
Thin wrapper over the `knnc` function.
"""
def __init__(self, k=1, **kwargs):
super().__init__(**kwargs)
self.k = k
@ -88,6 +85,5 @@ class CBCC(torch.nn.Module):
Thin wrapper over the `cbcc` function.
"""
def forward(self, detections, reasonings): # pylint: disable=no-self-use
return cbcc(detections, reasonings)

View File

@ -6,8 +6,7 @@ from typing import Union
import torch
from torch.nn.parameter import Parameter
from prototorch.utils import parse_distribution
from ..utils import parse_distribution
from .initializers import (
AbstractClassAwareCompInitializer,
AbstractComponentsInitializer,
@ -64,7 +63,6 @@ def get_cikwargs(init, distribution):
class AbstractComponents(torch.nn.Module):
"""Abstract class for all components modules."""
@property
def num_components(self):
"""Current number of components."""
@ -87,7 +85,6 @@ class AbstractComponents(torch.nn.Module):
class Components(AbstractComponents):
"""A set of adaptable Tensors."""
def __init__(self, num_components: int,
initializer: AbstractComponentsInitializer):
super().__init__()
@ -115,7 +112,6 @@ class Components(AbstractComponents):
class AbstractLabels(torch.nn.Module):
"""Abstract class for all labels modules."""
@property
def labels(self):
return self._labels.cpu()
@ -156,7 +152,6 @@ class AbstractLabels(torch.nn.Module):
class Labels(AbstractLabels):
"""A set of standalone labels."""
def __init__(self,
distribution: Union[dict, list, tuple],
initializer: AbstractLabelsInitializer = LabelsInitializer()):
@ -187,7 +182,6 @@ class Labels(AbstractLabels):
class LabeledComponents(AbstractComponents):
"""A set of adaptable components and corresponding unadaptable labels."""
def __init__(
self,
distribution: Union[dict, list, tuple],
@ -255,7 +249,6 @@ class Reasonings(torch.nn.Module):
The `reasonings` tensor is of shape [num_components, num_classes, 2].
"""
def __init__(
self,
distribution: Union[dict, list, tuple],
@ -315,7 +308,6 @@ class ReasoningComponents(AbstractComponents):
three element probability distribution.
"""
def __init__(
self,
distribution: Union[dict, list, tuple],

View File

@ -11,7 +11,7 @@ def squared_euclidean_distance(x, y):
**Alias:**
``prototorch.functions.distances.sed``
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
expanded_x = x.unsqueeze(dim=1)
batchwise_difference = y - expanded_x
differences_raised = torch.pow(batchwise_difference, 2)
@ -27,14 +27,14 @@ def euclidean_distance(x, y):
:returns: Distance Tensor of shape :math:`X \times Y`
:rtype: `torch.tensor`
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
distances_raised = squared_euclidean_distance(x, y)
distances = torch.sqrt(distances_raised)
return distances
def euclidean_distance_v2(x, y):
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
diff = y - x.unsqueeze(1)
pairwise_distances = (diff @ diff.permute((0, 2, 1))).sqrt()
# Passing `dim1=-2` and `dim2=-1` to `diagonal()` takes the
@ -54,7 +54,7 @@ def lpnorm_distance(x, y, p):
:param p: p parameter of the lp norm
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
distances = torch.cdist(x, y, p=p)
return distances
@ -66,7 +66,7 @@ def omega_distance(x, y, omega):
:param `torch.tensor` omega: Two dimensional matrix
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
projected_x = x @ omega
projected_y = y @ omega
distances = squared_euclidean_distance(projected_x, projected_y)
@ -80,7 +80,7 @@ def lomega_distance(x, y, omegas):
:param `torch.tensor` omegas: Three dimensional matrix
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
projected_x = x @ omegas
projected_y = torch.diagonal(y @ omegas).T
expanded_y = torch.unsqueeze(projected_y, dim=1)

View File

@ -11,7 +11,7 @@ from typing import (
import torch
from prototorch.utils import parse_data_arg, parse_distribution
from ..utils import parse_data_arg, parse_distribution
# Components
@ -26,18 +26,11 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
Use this to 'generate' pre-initialized components elsewhere.
"""
def __init__(self, components):
self.components = components
def generate(self, num_components: int = 0):
"""Ignore `num_components` and simply return `self.components`."""
provided_num_components = len(self.components)
if provided_num_components != num_components:
wmsg = f"The number of components ({provided_num_components}) " \
f"provided to {self.__class__.__name__} " \
f"does not match the expected number ({num_components})."
warnings.warn(wmsg)
if not isinstance(self.components, torch.Tensor):
wmsg = f"Converting components to {torch.Tensor}..."
warnings.warn(wmsg)
@ -47,7 +40,6 @@ class LiteralCompInitializer(AbstractComponentsInitializer):
class ShapeAwareCompInitializer(AbstractComponentsInitializer):
"""Abstract class for all dimension-aware components initializers."""
def __init__(self, shape: Union[Iterable, int]):
if isinstance(shape, Iterable):
self.component_shape = tuple(shape)
@ -61,7 +53,6 @@ class ShapeAwareCompInitializer(AbstractComponentsInitializer):
class ZerosCompInitializer(ShapeAwareCompInitializer):
"""Generate zeros corresponding to the components shape."""
def generate(self, num_components: int):
components = torch.zeros((num_components, ) + self.component_shape)
return components
@ -69,7 +60,6 @@ class ZerosCompInitializer(ShapeAwareCompInitializer):
class OnesCompInitializer(ShapeAwareCompInitializer):
"""Generate ones corresponding to the components shape."""
def generate(self, num_components: int):
components = torch.ones((num_components, ) + self.component_shape)
return components
@ -77,7 +67,6 @@ class OnesCompInitializer(ShapeAwareCompInitializer):
class FillValueCompInitializer(OnesCompInitializer):
"""Generate components with the provided `fill_value`."""
def __init__(self, shape, fill_value: float = 1.0):
super().__init__(shape)
self.fill_value = fill_value
@ -90,7 +79,6 @@ class FillValueCompInitializer(OnesCompInitializer):
class UniformCompInitializer(OnesCompInitializer):
"""Generate components by sampling from a continuous uniform distribution."""
def __init__(self, shape, minimum=0.0, maximum=1.0, scale=1.0):
super().__init__(shape)
self.minimum = minimum
@ -105,7 +93,6 @@ class UniformCompInitializer(OnesCompInitializer):
class RandomNormalCompInitializer(OnesCompInitializer):
"""Generate components by sampling from a standard normal distribution."""
def __init__(self, shape, shift=0.0, scale=1.0):
super().__init__(shape)
self.shift = shift
@ -126,7 +113,6 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
`data` has to be a torch tensor.
"""
def __init__(self,
data: torch.Tensor,
noise: float = 0.0,
@ -151,7 +137,6 @@ class AbstractDataAwareCompInitializer(AbstractComponentsInitializer):
class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
"""'Generate' the components from the provided data."""
def generate(self, num_components: int = 0):
"""Ignore `num_components` and simply return transformed `self.data`."""
components = self.generate_end_hook(self.data)
@ -160,7 +145,6 @@ class DataAwareCompInitializer(AbstractDataAwareCompInitializer):
class SelectionCompInitializer(AbstractDataAwareCompInitializer):
"""Generate components by uniformly sampling from the provided data."""
def generate(self, num_components: int):
indices = torch.LongTensor(num_components).random_(0, len(self.data))
samples = self.data[indices]
@ -170,7 +154,6 @@ class SelectionCompInitializer(AbstractDataAwareCompInitializer):
class MeanCompInitializer(AbstractDataAwareCompInitializer):
"""Generate components by computing the mean of the provided data."""
def generate(self, num_components: int):
mean = self.data.mean(dim=0)
repeat_dim = [num_components] + [1] * len(mean.shape)
@ -189,7 +172,6 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
target tensors.
"""
def __init__(self,
data,
noise: float = 0.0,
@ -217,7 +199,6 @@ class AbstractClassAwareCompInitializer(AbstractComponentsInitializer):
class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
"""'Generate' components from provided data and requested distribution."""
def generate(self, distribution: Union[dict, list, tuple]):
"""Ignore `distribution` and simply return transformed `self.data`."""
components = self.generate_end_hook(self.data)
@ -226,7 +207,6 @@ class ClassAwareCompInitializer(AbstractClassAwareCompInitializer):
class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
"""Abstract class for all stratified components initializers."""
@property
@abstractmethod
def subinit_type(self) -> Type[AbstractDataAwareCompInitializer]:
@ -237,8 +217,6 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
components = torch.tensor([])
for k, v in distribution.items():
stratified_data = self.data[self.targets == k]
if len(stratified_data) == 0:
raise ValueError(f"No data available for class {k}.")
initializer = self.subinit_type(
stratified_data,
noise=self.noise,
@ -251,7 +229,6 @@ class AbstractStratifiedCompInitializer(AbstractClassAwareCompInitializer):
class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
"""Generate components using stratified sampling from the provided data."""
@property
def subinit_type(self):
return SelectionCompInitializer
@ -259,7 +236,6 @@ class StratifiedSelectionCompInitializer(AbstractStratifiedCompInitializer):
class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
"""Generate components at stratified means of the provided data."""
@property
def subinit_type(self):
return MeanCompInitializer
@ -268,7 +244,6 @@ class StratifiedMeanCompInitializer(AbstractStratifiedCompInitializer):
# Labels
class AbstractLabelsInitializer(ABC):
"""Abstract class for all labels initializers."""
@abstractmethod
def generate(self, distribution: Union[dict, list, tuple]):
...
@ -280,7 +255,6 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
Use this to 'generate' pre-initialized labels elsewhere.
"""
def __init__(self, labels):
self.labels = labels
@ -299,7 +273,6 @@ class LiteralLabelsInitializer(AbstractLabelsInitializer):
class DataAwareLabelsInitializer(AbstractLabelsInitializer):
"""'Generate' the labels from a torch Dataset."""
def __init__(self, data):
self.data, self.targets = parse_data_arg(data)
@ -310,7 +283,6 @@ class DataAwareLabelsInitializer(AbstractLabelsInitializer):
class LabelsInitializer(AbstractLabelsInitializer):
"""Generate labels from `distribution`."""
def generate(self, distribution: Union[dict, list, tuple]):
distribution = parse_distribution(distribution)
labels_list = []
@ -322,7 +294,6 @@ class LabelsInitializer(AbstractLabelsInitializer):
class OneHotLabelsInitializer(LabelsInitializer):
"""Generate one-hot-encoded labels from `distribution`."""
def generate(self, distribution: Union[dict, list, tuple]):
distribution = parse_distribution(distribution)
num_classes = len(distribution.keys())
@ -341,7 +312,6 @@ def compute_distribution_shape(distribution):
class AbstractReasoningsInitializer(ABC):
"""Abstract class for all reasonings initializers."""
def __init__(self, components_first: bool = True):
self.components_first = components_first
@ -362,7 +332,6 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
Use this to 'generate' pre-initialized reasonings elsewhere.
"""
def __init__(self, reasonings, **kwargs):
super().__init__(**kwargs)
self.reasonings = reasonings
@ -380,7 +349,6 @@ class LiteralReasoningsInitializer(AbstractReasoningsInitializer):
class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
"""Reasonings are all initialized with zeros."""
def generate(self, distribution: Union[dict, list, tuple]):
shape = compute_distribution_shape(distribution)
reasonings = torch.zeros(*shape)
@ -390,7 +358,6 @@ class ZerosReasoningsInitializer(AbstractReasoningsInitializer):
class OnesReasoningsInitializer(AbstractReasoningsInitializer):
"""Reasonings are all initialized with ones."""
def generate(self, distribution: Union[dict, list, tuple]):
shape = compute_distribution_shape(distribution)
reasonings = torch.ones(*shape)
@ -400,7 +367,6 @@ class OnesReasoningsInitializer(AbstractReasoningsInitializer):
class RandomReasoningsInitializer(AbstractReasoningsInitializer):
"""Reasonings are randomly initialized."""
def __init__(self, minimum=0.4, maximum=0.6, **kwargs):
super().__init__(**kwargs)
self.minimum = minimum
@ -415,7 +381,6 @@ class RandomReasoningsInitializer(AbstractReasoningsInitializer):
class PurePositiveReasoningsInitializer(AbstractReasoningsInitializer):
"""Each component reasons positively for exactly one class."""
def generate(self, distribution: Union[dict, list, tuple]):
num_components, num_classes, _ = compute_distribution_shape(
distribution)
@ -434,7 +399,6 @@ class AbstractTransformInitializer(ABC):
class AbstractLinearTransformInitializer(AbstractTransformInitializer):
"""Abstract class for all linear transform initializers."""
def __init__(self, out_dim_first: bool = False):
self.out_dim_first = out_dim_first
@ -451,7 +415,6 @@ class AbstractLinearTransformInitializer(AbstractTransformInitializer):
class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with zeros."""
def generate(self, in_dim: int, out_dim: int):
weights = torch.zeros(in_dim, out_dim)
return self.generate_end_hook(weights)
@ -459,23 +422,13 @@ class ZerosLinearTransformInitializer(AbstractLinearTransformInitializer):
class OnesLinearTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with ones."""
def generate(self, in_dim: int, out_dim: int):
weights = torch.ones(in_dim, out_dim)
return self.generate_end_hook(weights)
class RandomLinearTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with random values."""
def generate(self, in_dim: int, out_dim: int):
weights = torch.rand(in_dim, out_dim)
return self.generate_end_hook(weights)
class EyeLinearTransformInitializer(AbstractLinearTransformInitializer):
class EyeTransformInitializer(AbstractLinearTransformInitializer):
"""Initialize a matrix with the largest possible identity matrix."""
def generate(self, in_dim: int, out_dim: int):
weights = torch.zeros(in_dim, out_dim)
I = torch.eye(min(in_dim, out_dim))
@ -485,7 +438,6 @@ class EyeLinearTransformInitializer(AbstractLinearTransformInitializer):
class AbstractDataAwareLTInitializer(AbstractLinearTransformInitializer):
"""Abstract class for all data-aware linear transform initializers."""
def __init__(self,
data: torch.Tensor,
noise: float = 0.0,
@ -506,19 +458,11 @@ class AbstractDataAwareLTInitializer(AbstractLinearTransformInitializer):
class PCALinearTransformInitializer(AbstractDataAwareLTInitializer):
"""Initialize a matrix with Eigenvectors from the data."""
def generate(self, in_dim: int, out_dim: int):
_, _, weights = torch.pca_lowrank(self.data, q=out_dim)
return self.generate_end_hook(weights)
class LiteralLinearTransformInitializer(AbstractDataAwareLTInitializer):
"""'Generate' the provided weights."""
def generate(self, in_dim: int, out_dim: int):
return self.generate_end_hook(self.data)
# Aliases - Components
CACI = ClassAwareCompInitializer
DACI = DataAwareCompInitializer
@ -547,9 +491,7 @@ RRI = RandomReasoningsInitializer
ZRI = ZerosReasoningsInitializer
# Aliases - Transforms
ELTI = Eye = EyeLinearTransformInitializer
Eye = EyeTransformInitializer
OLTI = OnesLinearTransformInitializer
RLTI = RandomLinearTransformInitializer
ZLTI = ZerosLinearTransformInitializer
PCALTI = PCALinearTransformInitializer
LLTI = LiteralLinearTransformInitializer

View File

@ -2,7 +2,7 @@
import torch
from prototorch.nn.activations import get_activation
from ..nn.activations import get_activation
# Helpers
@ -106,31 +106,19 @@ def margin_loss(y_pred, y_true, margin=0.3):
class GLVQLoss(torch.nn.Module):
def __init__(self,
margin=0.0,
transfer_fn="identity",
beta=10,
add_dp=False,
**kwargs):
def __init__(self, margin=0.0, transfer_fn="identity", beta=10, **kwargs):
super().__init__(**kwargs)
self.margin = margin
self.transfer_fn = get_activation(transfer_fn)
self.beta = torch.tensor(beta)
self.add_dp = add_dp
def forward(self, outputs, targets, plabels):
# mu = glvq_loss(outputs, targets, plabels)
dp, dm = _get_dp_dm(outputs, targets, plabels)
mu = (dp - dm) / (dp + dm)
if self.add_dp:
mu = mu + dp
mu = glvq_loss(outputs, targets, prototype_labels=plabels)
batch_loss = self.transfer_fn(mu + self.margin, beta=self.beta)
return batch_loss.sum()
class MarginLoss(torch.nn.modules.loss._Loss):
def __init__(self,
margin=0.3,
size_average=None,
@ -144,7 +132,6 @@ class MarginLoss(torch.nn.modules.loss._Loss):
class NeuralGasEnergy(torch.nn.Module):
def __init__(self, lm, **kwargs):
super().__init__(**kwargs)
self.lm = lm
@ -165,7 +152,6 @@ class NeuralGasEnergy(torch.nn.Module):
class GrowingNeuralGasEnergy(NeuralGasEnergy):
def __init__(self, topology_layer, **kwargs):
super().__init__(**kwargs)
self.topology_layer = topology_layer

View File

@ -82,27 +82,23 @@ def stratified_prod_pooling(values: torch.Tensor,
class StratifiedSumPooling(torch.nn.Module):
"""Thin wrapper over the `stratified_sum_pooling` function."""
def forward(self, values, labels): # pylint: disable=no-self-use
return stratified_sum_pooling(values, labels)
class StratifiedProdPooling(torch.nn.Module):
"""Thin wrapper over the `stratified_prod_pooling` function."""
def forward(self, values, labels): # pylint: disable=no-self-use
return stratified_prod_pooling(values, labels)
class StratifiedMinPooling(torch.nn.Module):
"""Thin wrapper over the `stratified_min_pooling` function."""
def forward(self, values, labels): # pylint: disable=no-self-use
return stratified_min_pooling(values, labels)
class StratifiedMaxPooling(torch.nn.Module):
"""Thin wrapper over the `stratified_max_pooling` function."""
def forward(self, values, labels): # pylint: disable=no-self-use
return stratified_max_pooling(values, labels)

View File

@ -21,7 +21,7 @@ def cosine_similarity(x, y):
Expected dimension of x is 2.
Expected dimension of y is 2.
"""
x, y = (arr.view(arr.size(0), -1) for arr in (x, y))
x, y = [arr.view(arr.size(0), -1) for arr in (x, y)]
norm_x = x.pow(2).sum(1).sqrt()
norm_y = y.pow(2).sum(1).sqrt()
norm_mat = norm_x.unsqueeze(-1) @ norm_y.unsqueeze(-1).T

View File

@ -5,18 +5,17 @@ from torch.nn.parameter import Parameter
from .initializers import (
AbstractLinearTransformInitializer,
EyeLinearTransformInitializer,
EyeTransformInitializer,
)
class LinearTransform(torch.nn.Module):
def __init__(
self,
in_dim: int,
out_dim: int,
initializer:
AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
AbstractLinearTransformInitializer = EyeTransformInitializer()):
super().__init__()
self.set_weights(in_dim, out_dim, initializer)
@ -32,15 +31,12 @@ class LinearTransform(torch.nn.Module):
in_dim: int,
out_dim: int,
initializer:
AbstractLinearTransformInitializer = EyeLinearTransformInitializer()):
AbstractLinearTransformInitializer = EyeTransformInitializer()):
weights = initializer.generate(in_dim, out_dim)
self._register_weights(weights)
def forward(self, x):
return x @ self._weights
def extra_repr(self):
return f"weights: (shape: {tuple(self._weights.shape)})"
return x @ self.weights
# Aliases

View File

@ -20,7 +20,7 @@ class Dataset(torch.utils.data.Dataset):
_repr_indent = 2
def __init__(self, root):
if isinstance(root, str):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
@ -93,7 +93,6 @@ class ProtoDataset(Dataset):
class NumpyDataset(torch.utils.data.TensorDataset):
"""Create a PyTorch TensorDataset from NumPy arrays."""
def __init__(self, data, targets):
self.data = torch.Tensor(data)
self.targets = torch.LongTensor(targets)
@ -103,7 +102,6 @@ class NumpyDataset(torch.utils.data.TensorDataset):
class CSVDataset(NumpyDataset):
"""Create a Dataset from a CSV file."""
def __init__(self, filepath, target_col=-1, delimiter=',', skip_header=0):
raw = np.genfromtxt(
filepath,

View File

@ -5,18 +5,11 @@ URL:
"""
from __future__ import annotations
import warnings
from typing import Sequence
from typing import Sequence, Union
from sklearn.datasets import (
load_iris,
make_blobs,
make_circles,
make_classification,
make_moons,
)
from sklearn.datasets import (load_iris, make_blobs, make_circles,
make_classification, make_moons)
from prototorch.datasets.abstract import NumpyDataset
@ -42,10 +35,9 @@ class Iris(NumpyDataset):
:param dims: select a subset of dimensions
"""
def __init__(self, dims: Sequence[int] | None = None):
def __init__(self, dims: Sequence[int] = None):
x, y = load_iris(return_X_y=True)
if dims is not None:
if dims:
x = x[:, dims]
super().__init__(x, y)
@ -57,20 +49,15 @@ class Blobs(NumpyDataset):
https://scikit-learn.org/stable/datasets/sample_generators.html#sample-generators.
"""
def __init__(
self,
num_samples: int = 300,
num_features: int = 2,
seed: None | int = 0,
):
x, y = make_blobs(
num_samples,
num_features,
centers=None,
random_state=seed,
shuffle=False,
)
def __init__(self,
num_samples: int = 300,
num_features: int = 2,
seed: Union[None, int] = 0):
x, y = make_blobs(num_samples,
num_features,
centers=None,
random_state=seed,
shuffle=False)
super().__init__(x, y)
@ -82,34 +69,29 @@ class Random(NumpyDataset):
Note: n_classes * n_clusters_per_class <= 2**n_informative must satisfy.
"""
def __init__(
self,
num_samples: int = 300,
num_features: int = 2,
num_classes: int = 2,
num_clusters: int = 2,
num_informative: None | int = None,
separation: float = 1.0,
seed: None | int = 0,
):
def __init__(self,
num_samples: int = 300,
num_features: int = 2,
num_classes: int = 2,
num_clusters: int = 2,
num_informative: Union[None, int] = None,
separation: float = 1.0,
seed: Union[None, int] = 0):
if not num_informative:
import math
num_informative = math.ceil(math.log2(num_classes * num_clusters))
if num_features < num_informative:
warnings.warn("Generating more features than requested.")
num_features = num_informative
x, y = make_classification(
num_samples,
num_features,
n_informative=num_informative,
n_redundant=0,
n_classes=num_classes,
n_clusters_per_class=num_clusters,
class_sep=separation,
random_state=seed,
shuffle=False,
)
x, y = make_classification(num_samples,
num_features,
n_informative=num_informative,
n_redundant=0,
n_classes=num_classes,
n_clusters_per_class=num_clusters,
class_sep=separation,
random_state=seed,
shuffle=False)
super().__init__(x, y)
@ -122,21 +104,16 @@ class Circles(NumpyDataset):
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_circles.html
"""
def __init__(
self,
num_samples: int = 300,
noise: float = 0.3,
factor: float = 0.8,
seed: None | int = 0,
):
x, y = make_circles(
num_samples,
noise=noise,
factor=factor,
random_state=seed,
shuffle=False,
)
def __init__(self,
num_samples: int = 300,
noise: float = 0.3,
factor: float = 0.8,
seed: Union[None, int] = 0):
x, y = make_circles(num_samples,
noise=noise,
factor=factor,
random_state=seed,
shuffle=False)
super().__init__(x, y)
@ -149,17 +126,12 @@ class Moons(NumpyDataset):
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
"""
def __init__(
self,
num_samples: int = 300,
noise: float = 0.3,
seed: None | int = 0,
):
x, y = make_moons(
num_samples,
noise=noise,
random_state=seed,
shuffle=False,
)
def __init__(self,
num_samples: int = 300,
noise: float = 0.3,
seed: Union[None, int] = 0):
x, y = make_moons(num_samples,
noise=noise,
random_state=seed,
shuffle=False)
super().__init__(x, y)

View File

@ -9,7 +9,6 @@ def make_spiral(num_samples=500, noise=0.3):
For use in Prototorch use `prototorch.datasets.Spiral` instead.
"""
def get_samples(n, delta_t):
points = []
for i in range(n):
@ -53,7 +52,6 @@ class Spiral(torch.utils.data.TensorDataset):
:param num_samples: number of random samples
:param noise: noise added to the spirals
"""
def __init__(self, num_samples: int = 500, noise: float = 0.3):
x, y = make_spiral(num_samples, noise)
super().__init__(torch.Tensor(x), torch.LongTensor(y))

View File

@ -36,7 +36,6 @@ Description:
are determined by analytic chemistry.
"""
import logging
import os
import numpy as np
@ -82,11 +81,13 @@ class Tecator(ProtoDataset):
if self._check_exists():
return
logging.debug("Making directories...")
if self.verbose:
print("Making directories...")
os.makedirs(self.raw_folder, exist_ok=True)
os.makedirs(self.processed_folder, exist_ok=True)
logging.debug("Downloading...")
if self.verbose:
print("Downloading...")
for fileid, md5 in self._resources:
filename = "tecator.npz"
download_file_from_google_drive(fileid,
@ -94,7 +95,8 @@ class Tecator(ProtoDataset):
filename=filename,
md5=md5)
logging.debug("Processing...")
if self.verbose:
print("Processing...")
with np.load(os.path.join(self.raw_folder, "tecator.npz"),
allow_pickle=False) as f:
x_train, y_train = f["x_train"], f["y_train"]
@ -115,4 +117,5 @@ class Tecator(ProtoDataset):
"wb") as f:
torch.save(test_set, f)
logging.debug("Done!")
if self.verbose:
print("Done!")

View File

@ -13,7 +13,6 @@ def make_xor(num_samples=500):
class XOR(torch.utils.data.TensorDataset):
"""Exclusive-or (XOR) dataset for binary classification."""
def __init__(self, num_samples: int = 500):
x, y = make_xor(num_samples)
super().__init__(x, y)

View File

@ -4,7 +4,6 @@ import torch
class LambdaLayer(torch.nn.Module):
def __init__(self, fn, name=None):
super().__init__()
self.fn = fn
@ -18,7 +17,6 @@ class LambdaLayer(torch.nn.Module):
class LossLayer(torch.nn.modules.loss._Loss):
def __init__(self,
fn,
name=None,

View File

@ -1,11 +1,6 @@
"""ProtoTorch utils module"""
"""ProtoFlow utils module"""
from .colors import (
get_colors,
get_legend_handles,
hex_to_rgb,
rgb_to_hex,
)
from .colors import hex_to_rgb, rgb_to_hex
from .utils import (
mesh2d,
parse_data_arg,

View File

@ -1,13 +1,4 @@
"""ProtoTorch color utilities"""
import matplotlib.lines as mlines
import torch
from matplotlib import cm
from matplotlib.colors import (
Normalize,
to_hex,
to_rgb,
)
"""ProtoFlow color utilities"""
def hex_to_rgb(hex_values):
@ -22,39 +13,3 @@ def rgb_to_hex(rgb_values):
for v in rgb_values:
c = "%02x%02x%02x" % tuple(v)
yield c
def get_colors(vmax, vmin=0, cmap="viridis"):
cmap = cm.get_cmap(cmap)
colornorm = Normalize(vmin=vmin, vmax=vmax)
colors = dict()
for c in range(vmin, vmax + 1):
colors[c] = to_hex(cmap(colornorm(c)))
return colors
def get_legend_handles(colors, labels, marker="dots", zero_indexed=False):
handles = list()
for color, label in zip(colors.values(), labels):
if marker == "dots":
handle = mlines.Line2D(
xdata=[],
ydata=[],
label=label,
color="white",
markerfacecolor=color,
marker="o",
markersize=10,
markeredgecolor="k",
)
else:
handle = mlines.Line2D(
xdata=[],
ydata=[],
label=label,
color=color,
marker="",
markersize=15,
)
handles.append(handle)
return handles

View File

@ -1,11 +1,10 @@
"""ProtoTorch utilities"""
"""ProtoFlow utilities"""
import warnings
from typing import (
Dict,
Iterable,
List,
Optional,
Union,
)
@ -14,32 +13,6 @@ import torch
from torch.utils.data import DataLoader, Dataset
def generate_mesh(
minima: torch.TensorType,
maxima: torch.TensorType,
border: float = 1.0,
resolution: int = 100,
device: Optional[torch.device] = None,
):
# Apply Border
ptp = maxima - minima
shift = border * ptp
minima -= shift
maxima += shift
# Generate Mesh
minima = minima.to(device).unsqueeze(1)
maxima = maxima.to(device).unsqueeze(1)
factors = torch.linspace(0, 1, resolution, device=device)
marginals = factors * maxima + ((1 - factors) * minima)
single_dimensions = torch.meshgrid(*marginals)
mesh_input = torch.stack([dim.ravel() for dim in single_dimensions], dim=1)
return mesh_input, single_dimensions
def mesh2d(x=None, border: float = 1.0, resolution: int = 100):
if x is not None:
x_shift = border * np.ptp(x[:, 0])
@ -56,15 +29,14 @@ def mesh2d(x=None, border: float = 1.0, resolution: int = 100):
def distribution_from_list(list_dist: List[int],
clabels: Optional[Iterable[int]] = None):
clabels: Iterable[int] = None):
clabels = clabels or list(range(len(list_dist)))
distribution = dict(zip(clabels, list_dist))
return distribution
def parse_distribution(
user_distribution,
clabels: Optional[Iterable[int]] = None) -> Dict[int, int]:
def parse_distribution(user_distribution,
clabels: Iterable[int] = None) -> Dict[int, int]:
"""Parse user-provided distribution.
Return a dictionary with integer keys that represent the class labels and

View File

@ -1,9 +1,8 @@
[pylint]
disable =
too-many-arguments,
too-few-public-methods,
fixme,
too-many-arguments,
too-few-public-methods,
fixme,
[pycodestyle]
max-line-length = 79
@ -13,4 +12,4 @@ multi_line_output = 3
include_trailing_comma = True
force_grid_wrap = 3
use_parentheses = True
line_length = 79
line_length = 79

View File

@ -15,22 +15,21 @@ from setuptools import find_packages, setup
PROJECT_URL = "https://github.com/si-cim/prototorch"
DOWNLOAD_URL = "https://github.com/si-cim/prototorch.git"
with open("README.md", encoding="utf-8") as fh:
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = [
"torch>=2.0.0",
"torchvision",
"numpy",
"scikit-learn",
"matplotlib",
"torch>=1.3.1",
"torchvision>=0.7.1",
"numpy>=1.9.1",
"sklearn",
]
DATASETS = [
"requests",
"tqdm",
]
DEV = [
"bump2version",
"bumpversion",
"pre-commit",
]
DOCS = [
@ -41,17 +40,18 @@ DOCS = [
"sphinx-autodoc-typehints",
]
EXAMPLES = [
"matplotlib",
"torchinfo",
]
TESTS = [
"flake8",
"pytest-cov",
"pytest",
]
ALL = DATASETS + DEV + DOCS + EXAMPLES + TESTS
setup(
name="prototorch",
version="0.7.6",
version="0.7.1",
description="Highly extensible, GPU-supported "
"Learning Vector Quantization (LVQ) toolbox "
"built using PyTorch and its nn API.",
@ -62,7 +62,7 @@ setup(
url=PROJECT_URL,
download_url=DOWNLOAD_URL,
license="MIT",
python_requires=">=3.8",
python_requires=">=3.6",
install_requires=INSTALL_REQUIRES,
extras_require={
"datasets": DATASETS,
@ -85,10 +85,10 @@ setup(
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
packages=find_packages(),
zip_safe=False,

View File

@ -245,20 +245,20 @@ def test_random_reasonings_init_channels_not_first():
# Transform initializers
def test_eye_transform_init_square():
t = pt.initializers.EyeLinearTransformInitializer()
t = pt.initializers.EyeTransformInitializer()
I = t.generate(3, 3)
assert torch.allclose(I, torch.eye(3))
def test_eye_transform_init_narrow():
t = pt.initializers.EyeLinearTransformInitializer()
t = pt.initializers.EyeTransformInitializer()
actual = t.generate(3, 2)
desired = torch.Tensor([[1, 0], [0, 1], [0, 0]])
assert torch.allclose(actual, desired)
def test_eye_transform_init_wide():
t = pt.initializers.EyeLinearTransformInitializer()
t = pt.initializers.EyeTransformInitializer()
actual = t.generate(2, 3)
desired = torch.Tensor([[1, 0, 0], [0, 1, 0]])
assert torch.allclose(actual, desired)
@ -404,7 +404,6 @@ def test_glvq_loss_one_hot_unequal():
# Activations
class TestActivations(unittest.TestCase):
def setUp(self):
self.flist = ["identity", "sigmoid_beta", "swish_beta"]
self.x = torch.randn(1024, 1)
@ -419,7 +418,6 @@ class TestActivations(unittest.TestCase):
self.assertTrue(iscallable)
def test_callable_deserialization(self):
def dummy(x, **kwargs):
return x
@ -464,7 +462,6 @@ class TestActivations(unittest.TestCase):
# Competitions
class TestCompetitions(unittest.TestCase):
def setUp(self):
pass
@ -518,7 +515,6 @@ class TestCompetitions(unittest.TestCase):
# Pooling
class TestPooling(unittest.TestCase):
def setUp(self):
pass
@ -619,7 +615,6 @@ class TestPooling(unittest.TestCase):
# Distances
class TestDistances(unittest.TestCase):
def setUp(self):
self.nx, self.mx = 32, 2048
self.ny, self.my = 8, 2048

View File

@ -1,6 +1,7 @@
"""ProtoTorch datasets test suite"""
import os
import shutil
import unittest
import numpy as np
@ -11,7 +12,6 @@ from prototorch.datasets.abstract import Dataset, ProtoDataset
class TestAbstract(unittest.TestCase):
def setUp(self):
self.ds = Dataset("./artifacts")
@ -28,7 +28,6 @@ class TestAbstract(unittest.TestCase):
class TestProtoDataset(unittest.TestCase):
def test_download(self):
with self.assertRaises(NotImplementedError):
_ = ProtoDataset("./artifacts", download=True)
@ -39,7 +38,6 @@ class TestProtoDataset(unittest.TestCase):
class TestNumpyDataset(unittest.TestCase):
def test_list_init(self):
ds = pt.datasets.NumpyDataset([1], [1])
self.assertEqual(len(ds), 1)
@ -52,7 +50,6 @@ class TestNumpyDataset(unittest.TestCase):
class TestCSVDataset(unittest.TestCase):
def setUp(self):
data = np.random.rand(100, 4)
targets = np.random.randint(2, size=(100, 1))
@ -70,14 +67,12 @@ class TestCSVDataset(unittest.TestCase):
class TestSpiral(unittest.TestCase):
def test_init(self):
ds = pt.datasets.Spiral(num_samples=10)
self.assertEqual(len(ds), 10)
class TestIris(unittest.TestCase):
def setUp(self):
self.ds = pt.datasets.Iris()
@ -93,28 +88,24 @@ class TestIris(unittest.TestCase):
class TestBlobs(unittest.TestCase):
def test_size(self):
ds = pt.datasets.Blobs(num_samples=10)
self.assertEqual(len(ds), 10)
class TestRandom(unittest.TestCase):
def test_size(self):
ds = pt.datasets.Random(num_samples=10)
self.assertEqual(len(ds), 10)
class TestCircles(unittest.TestCase):
def test_size(self):
ds = pt.datasets.Circles(num_samples=10)
self.assertEqual(len(ds), 10)
class TestMoons(unittest.TestCase):
def test_size(self):
ds = pt.datasets.Moons(num_samples=10)
self.assertEqual(len(ds), 10)