From ce66899e7f5d64475968a4189ad579143af1fb3b Mon Sep 17 00:00:00 2001 From: James Butler Date: Wed, 26 Feb 2025 01:04:08 -0500 Subject: [PATCH 1/3] Bump min torch to 2.2.0 to mitigate CVE-2024-31580 & CVE-2024-31583 This bumps the minimum required `torch` version from 1.13.1 to 2.2.0. See https://github.com/advisories/GHSA-5pcm-hx3q-hm94 and https://github.com/advisories/GHSA-pg7h-5qx3-wjr3 for more details regarding the "High" severity scoring. - https://nvd.nist.gov/vuln/detail/CVE-2024-31580 - https://nvd.nist.gov/vuln/detail/CVE-2024-31583 Signed-off-by: James Butler --- .github/workflows/cron.yml | 12 ++++----- .github/workflows/pythonapp-gpu.yml | 26 ++++++++++--------- .github/workflows/pythonapp-min.yml | 2 +- .github/workflows/pythonapp.yml | 6 ++--- docs/requirements.txt | 4 +-- environment-dev.yml | 2 +- monai/engines/evaluator.py | 11 +++----- monai/engines/trainer.py | 10 ++----- monai/networks/blocks/crossattention.py | 7 +---- monai/networks/blocks/selfattention.py | 7 +---- monai/networks/blocks/upsample.py | 14 +++------- pyproject.toml | 2 +- requirements.txt | 2 +- setup.cfg | 2 +- .../test_integration_bundle_run.py | 6 ++--- tests/nonconfig_workflow.py | 2 +- 16 files changed, 43 insertions(+), 72 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 2e7921ec94..6c3f1ccb26 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,17 +13,17 @@ jobs: strategy: matrix: environment: - - "PT113+CUDA118" - - "PT210+CUDA121" + - "PT220+CUDA118" + - "PT230+CUDA121" - "PT240+CUDA126" - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT113+CUDA118 - pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu121" + - environment: PT220+CUDA118 + pytorch: "torch==2.2.0 torchvision==0.17.0 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8 - - environment: PT210+CUDA121 - pytorch: "pytorch==2.1.0 torchvision==0.16.0 --extra-index-url https://download.pytorch.org/whl/cu121" + - environment: PT230+CUDA121 + pytorch: "pytorch==2.3.0 torchvision==0.18.0 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.1 - environment: PT240+CUDA126 pytorch: "pytorch==2.4.0 torchvision==0.19.0 --extra-index-url https://download.pytorch.org/whl/cu121" diff --git a/.github/workflows/pythonapp-gpu.yml b/.github/workflows/pythonapp-gpu.yml index cd916f2ebb..6b0a5084a2 100644 --- a/.github/workflows/pythonapp-gpu.yml +++ b/.github/workflows/pythonapp-gpu.yml @@ -22,19 +22,21 @@ jobs: strategy: matrix: environment: - - "PT113+CUDA116" - - "PT210+CUDA121DOCKER" + - "PT230+CUDA124DOCKER" + - "PT240+CUDA125DOCKER" + - "PT250+CUDA126DOCKER" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT113+CUDA116 - pytorch: "torch==1.13.1 torchvision==0.14.1" - base: "nvcr.io/nvidia/cuda:11.6.1-devel-ubuntu18.04" - - environment: PT210+CUDA121DOCKER - # 23.08: 2.1.0a0+29c30b1 + - environment: PT230+CUDA124DOCKER + # 24.04: 2.3.0a0+6ddf5cf85e pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error - base: "nvcr.io/nvidia/pytorch:23.08-py3" - - environment: PT210+CUDA121DOCKER - # 24.08: 2.3.0a0+40ec155e58.nv24.3 + base: "nvcr.io/nvidia/pytorch:24.04-py3" + - environment: PT240+CUDA125DOCKER + # 24.06: 2.4.0a0+f70bd71a48 + pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error + base: "nvcr.io/nvidia/pytorch:24.06-py3" + - environment: PT250+CUDA126DOCKER + # 24.08: 2.5.0a0+872d972e41 pytorch: "-h" # we explicitly set pytorch to -h to avoid pip install error base: "nvcr.io/nvidia/pytorch:24.08-py3" container: @@ -49,7 +51,7 @@ jobs: apt-get update apt-get install -y wget - if [ ${{ matrix.environment }} = "PT113+CUDA116" ] + if [ ${{ matrix.environment }} = "PT230+CUDA124" ] then PYVER=3.9 PYSFX=3 DISTUTILS=python3-distutils && \ apt-get update && apt-get install -y --no-install-recommends \ @@ -114,7 +116,7 @@ jobs: # build for the current self-hosted CI Tesla V100 BUILD_MONAI=1 TORCH_CUDA_ARCH_LIST="7.0" ./runtests.sh --build --disttests ./runtests.sh --quick --unittests - if [ ${{ matrix.environment }} = "PT113+CUDA116" ]; then + if [ ${{ matrix.environment }} = "PT230+CUDA124" ]; then # test the clang-format tool downloading once coverage run -m tests.clang_format_utils fi diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index 19e30f86bb..bd5e1ffb2b 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -124,7 +124,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: ['1.13.1', '2.0.1', '2.2.2', '2.3.1', '2.4.1', 'latest'] + pytorch-version: ['2.2.2', '2.3.1', '2.4.1', 'latest'] timeout-minutes: 40 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index f175cc3f7c..29a4ef3183 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -94,7 +94,7 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==1.13.1+cpu torchvision==0.14.1+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==2.2.0+cpu torchvision==0.17.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - if: runner.os == 'Linux' name: Install itk pre-release (Linux only) run: | @@ -103,7 +103,7 @@ jobs: - name: Install the dependencies run: | python -m pip install --user --upgrade pip wheel - python -m pip install torch==1.13.1 torchvision==0.14.1 + python -m pip install torch==2.2.0 torchvision==0.17.0 cat "requirements-dev.txt" python -m pip install -r requirements-dev.txt python -m pip list @@ -155,7 +155,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=1.13.1 torchvision + python -m pip install torch>=2.2.0 torchvision - name: Check packages run: | pip uninstall monai diff --git a/docs/requirements.txt b/docs/requirements.txt index d657580743..c0faa47264 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ --f https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl -torch>=1.13.1 +-f https://download.pytorch.org/whl/cpu/torch-2.2.0%2Bcpu-cp39-cp39-linux_x86_64.whl +torch>=2.2.0 pytorch-ignite==0.4.11 numpy>=1.20 itk>=5.2 diff --git a/environment-dev.yml b/environment-dev.yml index 8617a3b9cb..ba2fed6cdb 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -6,7 +6,7 @@ channels: - conda-forge dependencies: - numpy>=1.24,<2.0 - - pytorch>=1.13.1 + - pytorch>=2.2.0 - torchio - torchvision - pytorch-cuda>=11.6 diff --git a/monai/engines/evaluator.py b/monai/engines/evaluator.py index d70a39726b..35d4928465 100644 --- a/monai/engines/evaluator.py +++ b/monai/engines/evaluator.py @@ -28,7 +28,7 @@ from monai.utils import ForwardMode, IgniteInfo, ensure_tuple, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys -from monai.utils.module import look_up_option, pytorch_after +from monai.utils.module import look_up_option if TYPE_CHECKING: from ignite.engine import Engine, EventEnum @@ -269,13 +269,8 @@ def __init__( amp_kwargs=amp_kwargs, ) if compile: - if pytorch_after(2, 1): - compile_kwargs = {} if compile_kwargs is None else compile_kwargs - network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] - else: - warnings.warn( - "Network compilation (compile=True) not supported for Pytorch versions before 2.1, no compilation done" - ) + compile_kwargs = {} if compile_kwargs is None else compile_kwargs + network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] self.network = network self.compile = compile self.inferer = SimpleInferer() if inferer is None else inferer diff --git a/monai/engines/trainer.py b/monai/engines/trainer.py index a0be86bae5..fdb45fbab8 100644 --- a/monai/engines/trainer.py +++ b/monai/engines/trainer.py @@ -27,7 +27,6 @@ from monai.utils import AdversarialIterationEvents, AdversarialKeys, GanKeys, IgniteInfo, min_version, optional_import from monai.utils.enums import CommonKeys as Keys from monai.utils.enums import EngineStatsKeys as ESKeys -from monai.utils.module import pytorch_after if TYPE_CHECKING: from ignite.engine import Engine, EventEnum @@ -183,13 +182,8 @@ def __init__( amp_kwargs=amp_kwargs, ) if compile: - if pytorch_after(2, 1): - compile_kwargs = {} if compile_kwargs is None else compile_kwargs - network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] - else: - warnings.warn( - "Network compilation (compile=True) not supported for Pytorch versions before 2.1, no compilation done" - ) + compile_kwargs = {} if compile_kwargs is None else compile_kwargs + network = torch.compile(network, **compile_kwargs) # type: ignore[assignment] self.network = network self.compile = compile self.optimizer = optimizer diff --git a/monai/networks/blocks/crossattention.py b/monai/networks/blocks/crossattention.py index bdecf63168..be31d2d8fb 100644 --- a/monai/networks/blocks/crossattention.py +++ b/monai/networks/blocks/crossattention.py @@ -17,7 +17,7 @@ import torch.nn as nn from monai.networks.layers.utils import get_rel_pos_embedding_layer -from monai.utils import optional_import, pytorch_after +from monai.utils import optional_import Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") @@ -84,11 +84,6 @@ def __init__( if causal and sequence_length is None: raise ValueError("sequence_length is necessary for causal attention.") - if use_flash_attention and not pytorch_after(minor=13, major=1, patch=0): - raise ValueError( - "use_flash_attention is only supported for PyTorch versions >= 2.0." - "Upgrade your PyTorch or set the flag to False." - ) if use_flash_attention and save_attn: raise ValueError( "save_attn has been set to True, but use_flash_attention is also set" diff --git a/monai/networks/blocks/selfattention.py b/monai/networks/blocks/selfattention.py index 86e1b1d3ae..360579f3df 100644 --- a/monai/networks/blocks/selfattention.py +++ b/monai/networks/blocks/selfattention.py @@ -18,7 +18,7 @@ import torch.nn.functional as F from monai.networks.layers.utils import get_rel_pos_embedding_layer -from monai.utils import optional_import, pytorch_after +from monai.utils import optional_import Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange") @@ -90,11 +90,6 @@ def __init__( if causal and sequence_length is None: raise ValueError("sequence_length is necessary for causal attention.") - if use_flash_attention and not pytorch_after(minor=13, major=1, patch=0): - raise ValueError( - "use_flash_attention is only supported for PyTorch versions >= 2.0." - "Upgrade your PyTorch or set the flag to False." - ) if use_flash_attention and save_attn: raise ValueError( "save_attn has been set to True, but use_flash_attention is also set" diff --git a/monai/networks/blocks/upsample.py b/monai/networks/blocks/upsample.py index 50fd39a70b..62908e9825 100644 --- a/monai/networks/blocks/upsample.py +++ b/monai/networks/blocks/upsample.py @@ -17,8 +17,8 @@ import torch.nn as nn from monai.networks.layers.factories import Conv, Pad, Pool -from monai.networks.utils import CastTempType, icnr_init, pixelshuffle -from monai.utils import InterpolateMode, UpsampleMode, ensure_tuple_rep, look_up_option, pytorch_after +from monai.networks.utils import icnr_init, pixelshuffle +from monai.utils import InterpolateMode, UpsampleMode, ensure_tuple_rep, look_up_option __all__ = ["Upsample", "UpSample", "SubpixelUpsample", "Subpixelupsample", "SubpixelUpSample"] @@ -164,15 +164,7 @@ def __init__( align_corners=align_corners, ) - # Cast to float32 as 'upsample_nearest2d_out_frame' op does not support bfloat16 - # https://github.com/pytorch/pytorch/issues/86679. This issue is solved in PyTorch 2.1 - if pytorch_after(major=2, minor=1): - self.add_module("upsample_non_trainable", upsample) - else: - self.add_module( - "upsample_non_trainable", - CastTempType(initial_type=torch.bfloat16, temporary_type=torch.float32, submodule=upsample), - ) + self.add_module("upsample_non_trainable", upsample) if post_conv: self.add_module("postconv", post_conv) elif up_mode == UpsampleMode.PIXELSHUFFLE: diff --git a/pyproject.toml b/pyproject.toml index 8ad55b1c2c..14e5702fa9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=1.13.1", + "torch>=2.2.0", "ninja", "packaging" ] diff --git a/requirements.txt b/requirements.txt index 5203b43128..0c35108be1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -torch>=1.13.1,<2.6 +torch>=2.2.0,<2.6 numpy>=1.24,<2.0 diff --git a/setup.cfg b/setup.cfg index 66d9e19609..b07684610e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,7 +42,7 @@ setup_requires = ninja packaging install_requires = - torch>=1.13.1 + torch>=2.2.0 numpy>=1.24,<2.0 [options.extras_require] diff --git a/tests/integration/test_integration_bundle_run.py b/tests/integration/test_integration_bundle_run.py index cfbbcfe154..c60920a5fb 100644 --- a/tests/integration/test_integration_bundle_run.py +++ b/tests/integration/test_integration_bundle_run.py @@ -76,8 +76,7 @@ def test_tiny(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"}, - f, + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.2.0", "numpy_version": "1.22.2"}, f ) cmd = ["coverage", "run", "-m", "monai.bundle"] # test both CLI entry "run" and "run_workflow" @@ -114,8 +113,7 @@ def test_scripts_fold(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "1.13.1", "numpy_version": "1.22.2"}, - f, + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.2.0", "numpy_version": "1.22.2"}, f ) os.mkdir(scripts_dir) diff --git a/tests/nonconfig_workflow.py b/tests/nonconfig_workflow.py index fcfc5b2951..ddf03b70cb 100644 --- a/tests/nonconfig_workflow.py +++ b/tests/nonconfig_workflow.py @@ -65,7 +65,7 @@ def initialize(self): self._monai_version = "1.1.0" if self._pytorch_version is None: - self._pytorch_version = "1.13.1" + self._pytorch_version = "2.2.0" if self._numpy_version is None: self._numpy_version = "1.22.2" From f4def864f9d865d0d11b05228b1754524ae99ebf Mon Sep 17 00:00:00 2001 From: James Butler Date: Wed, 26 Feb 2025 01:22:44 -0500 Subject: [PATCH 2/3] Bump torch version enabling numpy 2 compatibility PyTorch added support for numpy 2 starting with PyTorch 2.3.0. This allows for numpy 1 or numpy 2 to be used with torch>=2.3.0. A special case is being handled on Windows as PyTorch Windows binaries had compatibilities issues with numpy 2 that were fixed in torch 2.4.1 (see https://github.com/pytorch/pytorch/issues/131668#issuecomment-2307447045). Signed-off-by: James Butler --- .github/workflows/cron.yml | 4 ---- .github/workflows/pythonapp-min.yml | 2 +- .github/workflows/pythonapp.yml | 6 +++--- docs/requirements.txt | 4 ++-- environment-dev.yml | 4 ++-- pyproject.toml | 2 +- requirements.txt | 5 +++-- setup.cfg | 5 +++-- tests/integration/test_integration_bundle_run.py | 4 ++-- tests/nonconfig_workflow.py | 2 +- 10 files changed, 18 insertions(+), 20 deletions(-) diff --git a/.github/workflows/cron.yml b/.github/workflows/cron.yml index 6c3f1ccb26..77fe9ca3a2 100644 --- a/.github/workflows/cron.yml +++ b/.github/workflows/cron.yml @@ -13,15 +13,11 @@ jobs: strategy: matrix: environment: - - "PT220+CUDA118" - "PT230+CUDA121" - "PT240+CUDA126" - "PTLATEST+CUDA126" include: # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes - - environment: PT220+CUDA118 - pytorch: "torch==2.2.0 torchvision==0.17.0 --extra-index-url https://download.pytorch.org/whl/cu121" - base: "nvcr.io/nvidia/pytorch:22.10-py3" # CUDA 11.8 - environment: PT230+CUDA121 pytorch: "pytorch==2.3.0 torchvision==0.18.0 --extra-index-url https://download.pytorch.org/whl/cu121" base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.1 diff --git a/.github/workflows/pythonapp-min.yml b/.github/workflows/pythonapp-min.yml index bd5e1ffb2b..afc9f6f6d4 100644 --- a/.github/workflows/pythonapp-min.yml +++ b/.github/workflows/pythonapp-min.yml @@ -124,7 +124,7 @@ jobs: strategy: fail-fast: false matrix: - pytorch-version: ['2.2.2', '2.3.1', '2.4.1', 'latest'] + pytorch-version: ['2.3.1', '2.4.1', '2.5.1', 'latest'] timeout-minutes: 40 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 29a4ef3183..5d6fd06afa 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -94,7 +94,7 @@ jobs: - if: runner.os == 'windows' name: Install torch cpu from pytorch.org (Windows only) run: | - python -m pip install torch==2.2.0+cpu torchvision==0.17.0+cpu -f https://download.pytorch.org/whl/torch_stable.html + python -m pip install torch==2.4.1 torchvision==0.19.1+cpu --index-url https://download.pytorch.org/whl/cpu - if: runner.os == 'Linux' name: Install itk pre-release (Linux only) run: | @@ -103,7 +103,7 @@ jobs: - name: Install the dependencies run: | python -m pip install --user --upgrade pip wheel - python -m pip install torch==2.2.0 torchvision==0.17.0 + python -m pip install torch==2.4.1 torchvision==0.19.1 cat "requirements-dev.txt" python -m pip install -r requirements-dev.txt python -m pip list @@ -155,7 +155,7 @@ jobs: # install the latest pytorch for testing # however, "pip install monai*.tar.gz" will build cpp/cuda with an isolated # fresh torch installation according to pyproject.toml - python -m pip install torch>=2.2.0 torchvision + python -m pip install torch>=2.3.0 torchvision - name: Check packages run: | pip uninstall monai diff --git a/docs/requirements.txt b/docs/requirements.txt index c0faa47264..b314e10640 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,5 @@ --f https://download.pytorch.org/whl/cpu/torch-2.2.0%2Bcpu-cp39-cp39-linux_x86_64.whl -torch>=2.2.0 +-f https://download.pytorch.org/whl/cpu/torch-2.3.0%2Bcpu-cp39-cp39-linux_x86_64.whl +torch>=2.3.0 pytorch-ignite==0.4.11 numpy>=1.20 itk>=5.2 diff --git a/environment-dev.yml b/environment-dev.yml index ba2fed6cdb..9358cdc83b 100644 --- a/environment-dev.yml +++ b/environment-dev.yml @@ -5,8 +5,8 @@ channels: - nvidia - conda-forge dependencies: - - numpy>=1.24,<2.0 - - pytorch>=2.2.0 + - numpy>=1.24,<3.0 + - pytorch>=2.3.0 - torchio - torchvision - pytorch-cuda>=11.6 diff --git a/pyproject.toml b/pyproject.toml index 14e5702fa9..588d6d22d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "wheel", "setuptools", - "torch>=2.2.0", + "torch>=2.3.0", "ninja", "packaging" ] diff --git a/requirements.txt b/requirements.txt index 0c35108be1..452a62adda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ -torch>=2.2.0,<2.6 -numpy>=1.24,<2.0 +torch>=2.3.0,<2.6; sys_platform != 'win32' +torch>=2.4.1,<2.6; sys_platform == 'win32' +numpy>=1.24,<3.0 diff --git a/setup.cfg b/setup.cfg index b07684610e..2b06df64de 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,8 +42,9 @@ setup_requires = ninja packaging install_requires = - torch>=2.2.0 - numpy>=1.24,<2.0 + torch>=2.3.0; sys_platform != 'win32' + torch>=2.4.1; sys_platform == 'win32' + numpy>=1.24,<3.0 [options.extras_require] all = diff --git a/tests/integration/test_integration_bundle_run.py b/tests/integration/test_integration_bundle_run.py index c60920a5fb..7f366d4745 100644 --- a/tests/integration/test_integration_bundle_run.py +++ b/tests/integration/test_integration_bundle_run.py @@ -76,7 +76,7 @@ def test_tiny(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.2.0", "numpy_version": "1.22.2"}, f + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f ) cmd = ["coverage", "run", "-m", "monai.bundle"] # test both CLI entry "run" and "run_workflow" @@ -113,7 +113,7 @@ def test_scripts_fold(self): ) with open(meta_file, "w") as f: json.dump( - {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.2.0", "numpy_version": "1.22.2"}, f + {"version": "0.1.0", "monai_version": "1.1.0", "pytorch_version": "2.3.0", "numpy_version": "1.22.2"}, f ) os.mkdir(scripts_dir) diff --git a/tests/nonconfig_workflow.py b/tests/nonconfig_workflow.py index ddf03b70cb..bcbdc67b71 100644 --- a/tests/nonconfig_workflow.py +++ b/tests/nonconfig_workflow.py @@ -65,7 +65,7 @@ def initialize(self): self._monai_version = "1.1.0" if self._pytorch_version is None: - self._pytorch_version = "2.2.0" + self._pytorch_version = "2.3.0" if self._numpy_version is None: self._numpy_version = "1.22.2" From 9b6f62219969b3069002875a22c002e36195678f Mon Sep 17 00:00:00 2001 From: James Butler Date: Fri, 28 Feb 2025 09:40:52 -0500 Subject: [PATCH 3/3] Allow forgiving tolerance on some array comparision tests This may have comes with the numpy 2 change where the precision of scalars is now preserved consistently. See https://numpy.org/devdocs/numpy_2_0_migration_guide.html#changes-to-numpy-data-type-promotion Observed testing results which failed only on the macOS runner: Mismatched elements: 1 / 1 (100%) Max absolute difference among violations: 2.74195588e-09 Max relative difference among violations: 3.36410326e-09 ACTUAL: array([0.815063], dtype=float32) DESIRED: array(0.815063) Signed-off-by: James Butler --- tests/metrics/test_surface_dice.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/metrics/test_surface_dice.py b/tests/metrics/test_surface_dice.py index 01f80bd01e..a3d03e9937 100644 --- a/tests/metrics/test_surface_dice.py +++ b/tests/metrics/test_surface_dice.py @@ -82,7 +82,7 @@ def test_tolerance_euclidean_distance_with_spacing(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_euclidean_distance(self): @@ -126,7 +126,7 @@ def test_tolerance_euclidean_distance(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_euclidean_distance_3d(self): @@ -173,7 +173,7 @@ def test_tolerance_euclidean_distance_3d(self): expected_res0[1, 1] = np.nan for b, c in np.ndindex(batch_size, n_class): np.testing.assert_allclose(expected_res0[b, c], res0[b, c].cpu()) - np.testing.assert_array_equal(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) + np.testing.assert_allclose(agg0.cpu(), np.nanmean(np.nanmean(expected_res0, axis=1), axis=0)) np.testing.assert_equal(not_nans.cpu(), torch.tensor(2)) def test_tolerance_all_distances(self):