Skip to content

Commit

Permalink
Merge branch 'dev' of github.com:ENSTA-U2IS-AI/torch-uncertainty into…
Browse files Browse the repository at this point in the history
… dev
  • Loading branch information
o-laurent committed Sep 12, 2024
2 parents ea25466 + 8dc7b3f commit eb72501
Show file tree
Hide file tree
Showing 25 changed files with 97 additions and 22 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:

- name: Install dependencies
run: |
python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cpu
python3 -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
python3 -m pip install .[image,dev,docs]
- name: Sphinx build
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ jobs:
- name: Install dependencies
if: steps.changed-files-specific.outputs.only_changed != 'true'
run: |
python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cpu
python3 -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
python3 -m pip install .[all]
- name: Check style & format
Expand Down
3 changes: 2 additions & 1 deletion auto_tutorials_source/tutorial_bayesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
We will also need to define an optimizer using torch.optim and Pytorch's
neural network utils from torch.nn.
"""
# %%
from pathlib import Path

from lightning.pytorch import Trainer
Expand Down Expand Up @@ -93,7 +94,7 @@ def optim_lenet(model: nn.Module):
loss = ELBOLoss(
model=model,
inner_loss=nn.CrossEntropyLoss(),
kl_weight=1 / 50000,
kl_weight=1 / 10000,
num_samples=3,
)

Expand Down
1 change: 1 addition & 0 deletions auto_tutorials_source/tutorial_corruption.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
torch_uncertainty.transforms.corruption. We also need to load utilities from
torchvision and matplotlib.
"""
# %%
from torchvision.datasets import CIFAR10
from torchvision.transforms import Compose, ToTensor, Resize

Expand Down
1 change: 1 addition & 0 deletions auto_tutorials_source/tutorial_der_cubic.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
We also need to define an optimizer using torch.optim and the neural network utils within torch.nn.
"""
# %%
import torch
from lightning.pytorch import Trainer
from lightning import LightningDataModule
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
We also need to define an optimizer using torch.optim, the neural network utils within torch.nn.
"""
# %%
from pathlib import Path

import torch
Expand Down
5 changes: 3 additions & 2 deletions auto_tutorials_source/tutorial_from_de_to_pe.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
The dataset is automatically downloaded using torchvision. We then visualize a few images to see a bit what we are working with.
"""
# Create the transforms for the images
# %%
import torch
import torchvision.transforms as T

Expand Down Expand Up @@ -241,7 +242,7 @@ def optim_recipe(model, lr_mult: float = 1.0):
# We have put the pre-trained models on Hugging Face that you can download with the utility function
# "hf_hub_download" imported just below. These models are trained for 75 epochs and are therefore not
# comparable to the all the other models trained in this notebook. The pretrained models can be seen
# `here <https://huggingface.co/ENSTA-U2IS/tutorial-models>`_ and TorchUncertainty's are `here <https://huggingface.co/torch-uncertainty>`_.
# on `HuggingFace <https://huggingface.co/ENSTA-U2IS/tutorial-models>`_ and TorchUncertainty's are `here <https://huggingface.co/torch-uncertainty>`_.

from torch_uncertainty.utils.hub import hf_hub_download

Expand Down Expand Up @@ -297,7 +298,7 @@ def optim_recipe(model, lr_mult: float = 1.0):
# This modification is particularly useful when the ensemble size is large, as it is often the case in practice.
#
# We will need to update the model and replace the layers with their Packed equivalents. You can find the
# documentation of the Packed-Linear layer `here <https://torch-uncertainty.github.io/generated/torch_uncertainty.layers.PackedLinear.html>`_,
# documentation of the Packed-Linear layer using this `link <https://torch-uncertainty.github.io/generated/torch_uncertainty.layers.PackedLinear.html>`_,
# and the Packed-Conv2D, `here <https://torch-uncertainty.github.io/generated/torch_uncertainty.layers.PackedLinear.html>`_.

import torch
Expand Down
1 change: 1 addition & 0 deletions auto_tutorials_source/tutorial_mc_batch_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
We also need import the neural network utils within `torch.nn`.
"""
# %%
from pathlib import Path

from lightning import Trainer
Expand Down
2 changes: 1 addition & 1 deletion auto_tutorials_source/tutorial_mc_dropout.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
We also need import the neural network utils within `torch.nn`.
"""

# %%
from pathlib import Path

from torch_uncertainty.utils import TUTrainer
Expand Down
1 change: 1 addition & 0 deletions auto_tutorials_source/tutorial_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
If you use the classification routine, the plots will be automatically available in the tensorboard logs if you use the `log_plots` flag.
"""
# %%
from torch_uncertainty.datamodules import CIFAR100DataModule
from torch_uncertainty.metrics import CalibrationError
from torch_uncertainty.models.resnet import resnet
Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
f"{datetime.now().year!s}, Adrien Lafage and Olivier Laurent"
)
author = "Adrien Lafage and Olivier Laurent"
release = "0.2.1.post0"
release = "0.2.2"

# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
Expand Down
2 changes: 2 additions & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ the models and metrics implemented.
Installation
^^^^^^^^^^^^

Make sure you have Python 3.10 or later installed, as well as Pytorch (cpu or gpu).

.. parsed-literal::
pip install torch-uncertainty
Expand Down
2 changes: 1 addition & 1 deletion docs/source/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ folder or if you want to contribute to the project.
From PyPI
---------

Check that you have PyTorch (cpu or gpu) installed on your system. Then, install
Check that you have Python 3.10 (or later) and PyTorch (cpu or gpu) installed on your system. Then, install
the package via pip:

.. parsed-literal::
Expand Down
13 changes: 13 additions & 0 deletions docs/source/references.rst
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,19 @@ For Laplace Approximation, consider citing:
* Authors: *Erik Daxberger, Agustinus Kristiadi, Alexander Immer, Runa Eschenhagen, Matthias Bauer, and Philipp Hennig*
* Paper: `NeurIPS 2021 <https://arxiv.org/abs/2106.14806>`__.

Losses
------

Conflictual Loss
^^^^^^^^^^^^^^^^

For the conflictual loss, consider citing:

**On the Calibration of Epistemic Uncertainty: Principles, Paradoxes and Conflictual Loss**

* Authors: *Mohammed Fellaji, Frédéric Pennerath, Brieuc Conan-Guez, and Miguel Couceiro*
* Paper: `ArXiv 2024 <https://arxiv.org/pdf/2407.12211`__.

Metrics
-------

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "flit_core.buildapi"

[project]
name = "torch_uncertainty"
version = "0.2.1.post0"
version = "0.2.2"
authors = [
{ name = "ENSTA U2IS", email = "olivier.laurent@ensta-paris.fr" },
{ name = "Adrien Lafage", email = "adrienlafage@outlook.com" },
Expand Down Expand Up @@ -32,7 +32,7 @@ classifiers = [
]
dependencies = [
"timm",
"lightning[pytorch-extra]",
"lightning[pytorch-extra]>=2.0",
"torchvision>=0.16",
"tensorboard",
"einops",
Expand Down
28 changes: 24 additions & 4 deletions tests/losses/test_bayesian.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
import pytest
import torch
from torch import nn
from torch import nn, optim

from torch_uncertainty.layers.bayesian import BayesLinear
from torch_uncertainty.losses import (
ELBOLoss,
)
from torch_uncertainty.losses import ELBOLoss
from torch_uncertainty.routines import RegressionRoutine


class TestELBOLoss:
Expand All @@ -24,6 +23,27 @@ def test_main(self):
loss = ELBOLoss(model, criterion, kl_weight=1e-5, num_samples=1)
loss(model(torch.randn(1, 1)), torch.randn(1, 1))

def test_training_step(self):
model = BayesLinear(10, 4)
criterion = nn.MSELoss()
loss = ELBOLoss(model, criterion, kl_weight=1 / 50000, num_samples=3)

routine = RegressionRoutine(
probabilistic=False,
output_dim=4,
model=model,
loss=loss,
optim_recipe=optim.Adam(
model.parameters(),
lr=5e-4,
weight_decay=0,
),
)

inputs = torch.randn(1, 10)
targets = torch.randn(1, 4)
routine.training_step((inputs, targets), 0)

def test_failures(self):
model = BayesLinear(1, 1)
criterion = nn.BCEWithLogitsLoss()
Expand Down
4 changes: 2 additions & 2 deletions tests/losses/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def test_main(self):
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))
loss = ConfidencePenaltyLoss(reg_weight=1e-2)
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))
loss = ConfidencePenaltyLoss(reg_weight=1e-2, reduction="none")
loss = ConfidencePenaltyLoss(reg_weight=1e-2, reduction=None)
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))

def test_failures(self):
Expand Down Expand Up @@ -92,7 +92,7 @@ def test_main(self):
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))
loss = ConflictualLoss(reg_weight=1e-2)
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))
loss = ConflictualLoss(reg_weight=1e-2, reduction="none")
loss = ConflictualLoss(reg_weight=1e-2, reduction=None)
loss(torch.tensor([[0.0, 0.0]]), torch.tensor([0]))

def test_failures(self):
Expand Down
8 changes: 8 additions & 0 deletions tests/metrics/classification/test_fpr95.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ def test_compute_one(self):
res = metric.compute()
assert res == 1

def test_compute_nan(self):
metric = FPR95(pos_label=1)
metric.update(
torch.as_tensor([0.1] * 50 + [0.4] * 50), torch.as_tensor([0] * 100)
)
res = metric.compute()
assert torch.isnan(res).all()

def test_error(self):
with pytest.raises(ValueError):
FPRx(recall_level=1.2, pos_label=1)
6 changes: 6 additions & 0 deletions tests/metrics/classification/test_risk_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ def test_compute_multiclass(self) -> None:
value = (0 * 0.4 + 0.25 * 0.2 / 2 + 0.25 * 0.2 + 0.15 * 0.2 / 2) / 0.8
assert metric(probs, targets).item() == pytest.approx(value)

def test_compute_nan(self) -> None:
probs = torch.as_tensor([[0.1, 0.9]])
targets = torch.as_tensor([1]).long()
metric = AURC()
assert torch.isnan(metric(probs, targets)).all()

def test_plot(self) -> None:
scores = torch.as_tensor([0.2, 0.1, 0.5, 0.3, 0.4])
values = torch.as_tensor([0.1, 0.2, 0.3, 0.4, 0.5])
Expand Down
12 changes: 12 additions & 0 deletions tests/test_optim_recipes.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,24 @@
import torch

from torch_uncertainty.optim_recipes import (
CosineAnnealingWarmup,
CosineSWALR,
get_procedure,
optim_abnn,
)


class TestCosineAnnealingWarmup:
def test_full_cosine_annealing_warmup(self):
CosineAnnealingWarmup(
torch.optim.SGD(torch.nn.Linear(1, 1).parameters(), lr=1e-3),
warmup_start_factor=0.1,
warmup_epochs=5,
max_epochs=100,
eta_min=1e-5,
)


class TestCosineSWALR:
def test_full_swa_lr(self):
CosineSWALR(
Expand Down
6 changes: 3 additions & 3 deletions torch_uncertainty/losses/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,16 +235,16 @@ def __init__(
reg_weight: float = 1,
reduction: str | None = "mean",
) -> None:
"""The Conflictual Loss.
r"""The Conflictual Loss.
Args:
reg_weight (float, optional): The weight of the regularization term.
reduction (str, optional): specifies the reduction to apply to the
output:``'none'`` | ``'mean'`` | ``'sum'``.
Reference:
Mohammed Fellaji et al. On the Calibration of Epistemic Uncertainty:
Principles, Paradoxes and Conflictual Loss. https://arxiv.org/pdf/2407.12211
`Mohammed Fellaji et al. On the Calibration of Epistemic Uncertainty:
Principles, Paradoxes and Conflictual Loss <https://arxiv.org/pdf/2407.12211>`_.
"""
super().__init__()
if reduction is None:
Expand Down
1 change: 1 addition & 0 deletions torch_uncertainty/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
CalibrationError,
CategoricalNLL,
CovAt5Risk,
CovAtxRisk,
Disagreement,
Entropy,
GroupingLoss,
Expand Down
1 change: 1 addition & 0 deletions torch_uncertainty/metrics/classification/fpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def compute(self) -> Tensor:
1 + threshold_idxs - true_pos
) # add one because of zero-based indexing

# check that there is at least one OOD example
if true_pos[-1] == 0:
return torch.tensor([torch.nan], device=self.device)

Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/metrics/classification/risk_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ class CovAtxRisk(Metric):
errors: list[Tensor]

def __init__(self, risk_threshold: float, **kwargs) -> None:
r"""`Coverage at x Risk`_.
r"""Coverage at x Risk.
If there are multiple coverage values corresponding to the given risk,
i.e., the risk(coverage) is not monotonic, the coverage at x risk is
Expand Down
9 changes: 7 additions & 2 deletions torch_uncertainty/routines/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from torch.optim import Optimizer
from torchmetrics import MeanAbsoluteError, MeanSquaredError, MetricCollection

from torch_uncertainty.losses import ELBOLoss
from torch_uncertainty.metrics import (
DistributionNLL,
)
Expand Down Expand Up @@ -154,12 +155,16 @@ def training_step(
self, batch: tuple[Tensor, Tensor], batch_idx: int
) -> STEP_OUTPUT:
inputs, targets = self.format_batch_fn(batch)
dists = self.model(inputs)

if self.one_dim_regression:
targets = targets.unsqueeze(-1)

loss = self.loss(dists, targets)
if isinstance(self.loss, ELBOLoss):
loss = self.loss(inputs, targets)
else:
dists = self.model(inputs)
loss = self.loss(dists, targets)

if self.needs_step_update:
self.model.update_wrapper(self.current_epoch)
self.log("train_loss", loss)
Expand Down

0 comments on commit eb72501

Please sign in to comment.