diff --git a/monai/config/__init__.py b/monai/config/__init__.py index 251be002f2..f1c7707d1f 100644 --- a/monai/config/__init__.py +++ b/monai/config/__init__.py @@ -18,4 +18,4 @@ print_gpu_info, print_system_info, ) -from .type_definitions import IndexSelection, KeysCollection +from .type_definitions import DtypeLike, IndexSelection, KeysCollection, NdarrayTensor diff --git a/monai/config/deviceconfig.py b/monai/config/deviceconfig.py index 9e448a9ac3..be77a1d975 100644 --- a/monai/config/deviceconfig.py +++ b/monai/config/deviceconfig.py @@ -162,7 +162,7 @@ def get_system_info() -> OrderedDict: _dict_append( output, "Avg. sensor temp. (Celsius)", - lambda: round( + lambda: np.round( np.mean([item.current for sublist in psutil.sensors_temperatures().values() for item in sublist], 1) ), ) diff --git a/monai/config/type_definitions.py b/monai/config/type_definitions.py index ea0c72576c..daa9b10052 100644 --- a/monai/config/type_definitions.py +++ b/monai/config/type_definitions.py @@ -9,9 +9,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Hashable, Iterable, Union +from typing import Collection, Hashable, Iterable, TypeVar, Union -__all__ = ["KeysCollection", "IndexSelection"] +import numpy as np +import torch + +__all__ = ["KeysCollection", "IndexSelection", "DtypeLike", "NdarrayTensor"] """Commonly used concepts This module provides naming and type specifications for commonly used concepts @@ -51,3 +54,16 @@ The indices must be integers, and if a container of indices is specified, the container must be iterable. """ + +DtypeLike = Union[ + np.dtype, + type, + None, +] +"""Type of datatypes +adapted from https://github.com/numpy/numpy/blob/master/numpy/typing/_dtype_like.py +""" + +# Generic type which can represent either a numpy.ndarray or a torch.Tensor +# Unlike Union can create a dependence between parameter(s) / return(s) +NdarrayTensor = TypeVar("NdarrayTensor", np.ndarray, torch.Tensor) diff --git a/monai/data/csv_saver.py b/monai/data/csv_saver.py index 5f5e415055..ec9ec562cd 100644 --- a/monai/data/csv_saver.py +++ b/monai/data/csv_saver.py @@ -75,7 +75,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] """ save_key = meta_data["filename_or_obj"] if meta_data else str(self._data_index) self._data_index += 1 - if torch.is_tensor(data): + if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() if not isinstance(data, np.ndarray): raise AssertionError diff --git a/monai/data/image_dataset.py b/monai/data/image_dataset.py index 7dd55431af..1568e082ee 100644 --- a/monai/data/image_dataset.py +++ b/monai/data/image_dataset.py @@ -14,6 +14,7 @@ import numpy as np from torch.utils.data import Dataset +from monai.config import DtypeLike from monai.data.image_reader import ImageReader from monai.transforms import LoadImage, Randomizable, apply_transform from monai.utils import MAX_SEED, get_seed @@ -36,7 +37,7 @@ def __init__( transform: Optional[Callable] = None, seg_transform: Optional[Callable] = None, image_only: bool = True, - dtype: Optional[np.dtype] = np.float32, + dtype: DtypeLike = np.float32, reader: Optional[Union[ImageReader, str]] = None, *args, **kwargs, diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 0fd784af05..d0f5f4aefc 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -16,7 +16,7 @@ import numpy as np from torch.utils.data._utils.collate import np_str_obj_array_pattern -from monai.config import KeysCollection +from monai.config import DtypeLike, KeysCollection from monai.data.utils import correct_nifti_header_if_necessary from monai.utils import ensure_tuple, optional_import @@ -244,7 +244,7 @@ def _get_affine(self, img) -> np.ndarray: affine = np.eye(direction.shape[0] + 1) affine[(slice(-1), slice(-1))] = direction @ np.diag(spacing) affine[(slice(-1), -1)] = origin - return affine + return np.asarray(affine) def _get_spatial_shape(self, img) -> np.ndarray: """ @@ -258,7 +258,7 @@ def _get_spatial_shape(self, img) -> np.ndarray: shape.reverse() return np.asarray(shape) - def _get_array_data(self, img) -> np.ndarray: + def _get_array_data(self, img): """ Get the raw array data of the image, converted to Numpy array. @@ -295,7 +295,7 @@ class NibabelReader(ImageReader): """ - def __init__(self, as_closest_canonical: bool = False, dtype: Optional[np.dtype] = np.float32, **kwargs): + def __init__(self, as_closest_canonical: bool = False, dtype: DtypeLike = np.float32, **kwargs): super().__init__() self.as_closest_canonical = as_closest_canonical self.dtype = dtype @@ -385,7 +385,7 @@ def _get_affine(self, img) -> np.ndarray: img: a Nibabel image object loaded from a image file. """ - return img.affine.copy() + return np.array(img.affine, copy=True) def _get_spatial_shape(self, img) -> np.ndarray: """ diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py index f4781f82fd..db559f97f4 100644 --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -14,6 +14,7 @@ import numpy as np import torch +from monai.config import DtypeLike from monai.data.nifti_writer import write_nifti from monai.data.utils import create_file_basename from monai.utils import GridSampleMode, GridSamplePadMode @@ -36,8 +37,8 @@ def __init__( mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, align_corners: bool = False, - dtype: Optional[np.dtype] = np.float64, - output_dtype: Optional[np.dtype] = np.float32, + dtype: DtypeLike = np.float64, + output_dtype: DtypeLike = np.float32, ) -> None: """ Args: @@ -100,7 +101,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] affine = meta_data.get("affine", None) if meta_data else None spatial_shape = meta_data.get("spatial_shape", None) if meta_data else None - if torch.is_tensor(data): + if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() filename = create_file_basename(self.output_postfix, filename, self.output_dir) @@ -109,7 +110,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] while len(data.shape) < 4: data = np.expand_dims(data, -1) # change data to "channel last" format and write to nifti format file - data = np.moveaxis(data, 0, -1) + data = np.moveaxis(np.asarray(data), 0, -1) write_nifti( data, file_name=filename, diff --git a/monai/data/nifti_writer.py b/monai/data/nifti_writer.py index 6837ebeb90..29dc62cdec 100644 --- a/monai/data/nifti_writer.py +++ b/monai/data/nifti_writer.py @@ -14,6 +14,7 @@ import numpy as np import torch +from monai.config import DtypeLike from monai.data.utils import compute_shape_offset, to_affine_nd from monai.networks.layers import AffineTransform from monai.utils import GridSampleMode, GridSamplePadMode, optional_import @@ -27,12 +28,12 @@ def write_nifti( affine: Optional[np.ndarray] = None, target_affine: Optional[np.ndarray] = None, resample: bool = True, - output_spatial_shape: Optional[Sequence[int]] = None, + output_spatial_shape: Union[Sequence[int], np.ndarray, None] = None, mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, align_corners: bool = False, - dtype: Optional[np.dtype] = np.float64, - output_dtype: Optional[np.dtype] = np.float32, + dtype: DtypeLike = np.float64, + output_dtype: DtypeLike = np.float32, ) -> None: """ Write numpy data into NIfTI files to disk. This function converts data @@ -126,7 +127,7 @@ def write_nifti( transform = np.linalg.inv(_affine) @ target_affine if output_spatial_shape is None: output_spatial_shape, _ = compute_shape_offset(data.shape, _affine, target_affine) - output_spatial_shape_ = list(output_spatial_shape) + output_spatial_shape_ = list(output_spatial_shape) if output_spatial_shape is not None else [] if data.ndim > 3: # multi channel, resampling each channel while len(output_spatial_shape_) < 3: output_spatial_shape_ = output_spatial_shape_ + [1] diff --git a/monai/data/png_saver.py b/monai/data/png_saver.py index 450e327d6b..8ed8b234f4 100644 --- a/monai/data/png_saver.py +++ b/monai/data/png_saver.py @@ -86,7 +86,7 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] self._data_index += 1 spatial_shape = meta_data.get("spatial_shape", None) if meta_data and self.resample else None - if torch.is_tensor(data): + if isinstance(data, torch.Tensor): data = data.detach().cpu().numpy() filename = create_file_basename(self.output_postfix, filename, self.output_dir) @@ -95,12 +95,12 @@ def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] if data.shape[0] == 1: data = data.squeeze(0) elif 2 < data.shape[0] < 5: - data = np.moveaxis(data, 0, -1) + data = np.moveaxis(np.asarray(data), 0, -1) else: raise ValueError(f"Unsupported number of channels: {data.shape[0]}, available options are [1, 3, 4]") write_png( - data, + np.asarray(data), file_name=filename, output_spatial_shape=spatial_shape, mode=self.mode, diff --git a/monai/data/png_writer.py b/monai/data/png_writer.py index d7baa6ea79..e6b9f1e8cf 100644 --- a/monai/data/png_writer.py +++ b/monai/data/png_writer.py @@ -65,10 +65,10 @@ def write_png( data = np.expand_dims(data, 0) # make a channel data = xform(data)[0] # first channel if mode != InterpolateMode.NEAREST: - data = np.clip(data, _min, _max) + data = np.clip(data, _min, _max) # type: ignore if scale is not None: - data = np.clip(data, 0.0, 1.0) # png writer only can scale data in range [0, 1] + data = np.clip(data, 0.0, 1.0) # type: ignore # png writer only can scale data in range [0, 1] if scale == np.iinfo(np.uint8).max: data = (scale * data).astype(np.uint8) elif scale == np.iinfo(np.uint16).max: diff --git a/monai/data/utils.py b/monai/data/utils.py index ca8f3b1017..acc6d2e97a 100644 --- a/monai/data/utils.py +++ b/monai/data/utils.py @@ -329,7 +329,7 @@ def rectify_header_sform_qform(img_nii): return img_nii -def zoom_affine(affine: np.ndarray, scale: Sequence[float], diagonal: bool = True) -> np.ndarray: +def zoom_affine(affine: np.ndarray, scale: Sequence[float], diagonal: bool = True): """ To make column norm of `affine` the same as `scale`. If diagonal is False, returns an affine that combines orthogonal rotation and the new scale. @@ -379,7 +379,7 @@ def zoom_affine(affine: np.ndarray, scale: Sequence[float], diagonal: bool = Tru def compute_shape_offset( - spatial_shape: np.ndarray, in_affine: np.ndarray, out_affine: np.ndarray + spatial_shape: Union[np.ndarray, Sequence[int]], in_affine: np.ndarray, out_affine: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: """ Given input and output affine, compute appropriate shapes diff --git a/monai/engines/utils.py b/monai/engines/utils.py index f603338097..8f5899f2a5 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -32,7 +32,7 @@ class IterationEvents(EventEnum): """ - Addtional Events engine can register and trigger in the iteration process. + Additional Events engine can register and trigger in the iteration process. Refer to the example in ignite: https://github.com/pytorch/ignite/blob/master/ignite/engine/events.py#L146 These Events can be triggered during training iteration: `FORWARD_COMPLETED` is the Event when `network(image, label)` completed. diff --git a/monai/handlers/iteration_metric.py b/monai/handlers/iteration_metric.py index bfc7252b2f..641efad243 100644 --- a/monai/handlers/iteration_metric.py +++ b/monai/handlers/iteration_metric.py @@ -96,7 +96,7 @@ def compute(self) -> Any: # save score of every image into engine.state for other components if self.save_details: if self._engine is None or self._name is None: - raise RuntimeError("plesae call the attach() function to connect expected engine first.") + raise RuntimeError("please call the attach() function to connect expected engine first.") self._engine.state.metric_details[self._name] = _scores result: torch.Tensor = torch.zeros(1) @@ -108,7 +108,7 @@ def compute(self) -> Any: # broadcast result to all processes result = idist.broadcast(result, src=0) - return result.item() if torch.is_tensor(result) else result + return result.item() if isinstance(result, torch.Tensor) else result def _reduce(self, scores) -> Any: return do_metric_reduction(scores, MetricReduction.MEAN)[0] diff --git a/monai/handlers/metrics_saver.py b/monai/handlers/metrics_saver.py index f9deea35df..d67f0f6c39 100644 --- a/monai/handlers/metrics_saver.py +++ b/monai/handlers/metrics_saver.py @@ -53,7 +53,7 @@ class MetricsSaver: should be within this list: [`mean`, `median`, `max`, `min`, `90percent`, `std`]. default to None. save_rank: only the handler on specified rank will save to files in multi-gpus validation, default to 0. - delimiter: the delimiter charactor in CSV file, default to "\t". + delimiter: the delimiter character in CSV file, default to "\t". output_type: expected output file type, supported types: ["csv"], default to "csv". """ diff --git a/monai/handlers/segmentation_saver.py b/monai/handlers/segmentation_saver.py index c712ce9a9e..8321a49851 100644 --- a/monai/handlers/segmentation_saver.py +++ b/monai/handlers/segmentation_saver.py @@ -14,6 +14,7 @@ import numpy as np +from monai.config import DtypeLike from monai.data import NiftiSaver, PNGSaver from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, exact_version, optional_import @@ -38,8 +39,8 @@ def __init__( mode: Union[GridSampleMode, InterpolateMode, str] = "nearest", padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, scale: Optional[int] = None, - dtype: Optional[np.dtype] = np.float64, - output_dtype: Optional[np.dtype] = np.float32, + dtype: DtypeLike = np.float64, + output_dtype: DtypeLike = np.float32, batch_transform: Callable = lambda x: x, output_transform: Callable = lambda x: x, name: Optional[str] = None, diff --git a/monai/handlers/stats_handler.py b/monai/handlers/stats_handler.py index 007fbed413..24d844569f 100644 --- a/monai/handlers/stats_handler.py +++ b/monai/handlers/stats_handler.py @@ -196,10 +196,12 @@ def _default_iteration_print(self, engine: Engine) -> None: " {}:{}".format(name, type(value)) ) continue # not printing multi dimensional output - out_str += self.key_var_format.format(name, value.item() if torch.is_tensor(value) else value) + out_str += self.key_var_format.format(name, value.item() if isinstance(value, torch.Tensor) else value) else: if is_scalar(loss): # not printing multi dimensional output - out_str += self.key_var_format.format(self.tag_name, loss.item() if torch.is_tensor(loss) else loss) + out_str += self.key_var_format.format( + self.tag_name, loss.item() if isinstance(loss, torch.Tensor) else loss + ) else: warnings.warn( "ignoring non-scalar output in StatsHandler," diff --git a/monai/handlers/tensorboard_handlers.py b/monai/handlers/tensorboard_handlers.py index 15fa6a5eed..acdfb84c8c 100644 --- a/monai/handlers/tensorboard_handlers.py +++ b/monai/handlers/tensorboard_handlers.py @@ -159,9 +159,13 @@ def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter) -> No " {}:{}".format(name, type(value)) ) continue # not plot multi dimensional output - writer.add_scalar(name, value.item() if torch.is_tensor(value) else value, engine.state.iteration) + writer.add_scalar( + name, value.item() if isinstance(value, torch.Tensor) else value, engine.state.iteration + ) elif is_scalar(loss): # not printing multi dimensional output - writer.add_scalar(self.tag_name, loss.item() if torch.is_tensor(loss) else loss, engine.state.iteration) + writer.add_scalar( + self.tag_name, loss.item() if isinstance(loss, torch.Tensor) else loss, engine.state.iteration + ) else: warnings.warn( "ignoring non-scalar output in TensorBoardStatsHandler," @@ -261,7 +265,7 @@ def __call__(self, engine: Engine) -> None: """ step = self.global_iter_transform(engine.state.epoch if self.epoch_level else engine.state.iteration) show_images = self.batch_transform(engine.state.batch)[0] - if torch.is_tensor(show_images): + if isinstance(show_images, torch.Tensor): show_images = show_images.detach().cpu().numpy() if show_images is not None: if not isinstance(show_images, np.ndarray): @@ -274,7 +278,7 @@ def __call__(self, engine: Engine) -> None: ) show_labels = self.batch_transform(engine.state.batch)[1] - if torch.is_tensor(show_labels): + if isinstance(show_labels, torch.Tensor): show_labels = show_labels.detach().cpu().numpy() if show_labels is not None: if not isinstance(show_labels, np.ndarray): @@ -287,7 +291,7 @@ def __call__(self, engine: Engine) -> None: ) show_outputs = self.output_transform(engine.state.output) - if torch.is_tensor(show_outputs): + if isinstance(show_outputs, torch.Tensor): show_outputs = show_outputs.detach().cpu().numpy() if show_outputs is not None: if not isinstance(show_outputs, np.ndarray): diff --git a/monai/handlers/utils.py b/monai/handlers/utils.py index ef652efe0a..a4b5c02f61 100644 --- a/monai/handlers/utils.py +++ b/monai/handlers/utils.py @@ -62,7 +62,7 @@ def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor: data: source tensor to pad and execute all_gather in distributed data parallel. """ - if not torch.is_tensor(data): + if not isinstance(data, torch.Tensor): raise ValueError("input data must be PyTorch Tensor.") if idist.get_world_size() <= 1: @@ -110,7 +110,7 @@ def write_metrics_reports( list of strings - generate summary report for every metric_details with specified operations, they should be within this list: [`mean`, `median`, `max`, `min`, `90percent`, `std`]. default to None. - deli: the delimiter charactor in the file, default to "\t". + deli: the delimiter character in the file, default to "\t". output_type: expected output file type, supported types: ["csv"], default to "csv". """ @@ -127,7 +127,7 @@ def write_metrics_reports( if metric_details is not None and len(metric_details) > 0: for k, v in metric_details.items(): - if torch.is_tensor(v): + if isinstance(v, torch.Tensor): v = v.cpu().numpy() if v.ndim == 0: # reshape to [1, 1] if no batch and class dims @@ -162,5 +162,5 @@ def write_metrics_reports( with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f: f.write(f"class{deli}{deli.join(ops)}\n") - for i, c in enumerate(v.transpose()): + for i, c in enumerate(np.transpose(v)): f.write(f"{class_labels[i]}{deli}{deli.join([f'{supported_ops[k](c):.4f}' for k in ops])}\n") diff --git a/monai/losses/dice.py b/monai/losses/dice.py index f14aa6955f..c284660cc6 100644 --- a/monai/losses/dice.py +++ b/monai/losses/dice.py @@ -508,7 +508,7 @@ def wasserstein_distance_map(self, flat_proba: torch.Tensor, flat_target: torch. flat_target: the target tensor. """ # Turn the distance matrix to a map of identical matrix - m = torch.clone(self.m).to(flat_proba.device) + m = torch.clone(torch.as_tensor(self.m)).to(flat_proba.device) m_extended = torch.unsqueeze(m, dim=0) m_extended = torch.unsqueeze(m_extended, dim=3) m_extended = m_extended.expand((flat_proba.size(0), m_extended.size(1), m_extended.size(2), flat_proba.size(2))) diff --git a/monai/metrics/hausdorff_distance.py b/monai/metrics/hausdorff_distance.py index 8ecc19ec46..6570ace800 100644 --- a/monai/metrics/hausdorff_distance.py +++ b/monai/metrics/hausdorff_distance.py @@ -127,9 +127,10 @@ def compute_hausdorff_distance( y_pred=y_pred, y=y, ) - - y = y.float() - y_pred = y_pred.float() + if isinstance(y, torch.Tensor): + y = y.float() + if isinstance(y_pred, torch.Tensor): + y_pred = y_pred.float() if y.shape != y_pred.shape: raise ValueError("y_pred and y should have same shapes.") diff --git a/monai/metrics/rocauc.py b/monai/metrics/rocauc.py index 9f081d1698..80a6671dfa 100644 --- a/monai/metrics/rocauc.py +++ b/monai/metrics/rocauc.py @@ -10,7 +10,7 @@ # limitations under the License. import warnings -from typing import Callable, List, Optional, Union, cast +from typing import Callable, Optional, Union, cast import numpy as np import torch @@ -57,7 +57,7 @@ def compute_roc_auc( softmax: bool = False, other_act: Optional[Callable] = None, average: Union[Average, str] = Average.MACRO, -) -> Union[np.ndarray, List[float], float]: +): """Computes Area Under the Receiver Operating Characteristic Curve (ROC AUC). Referring to: `sklearn.metrics.roc_auc_score `_. diff --git a/monai/metrics/surface_distance.py b/monai/metrics/surface_distance.py index 9e2f130bd2..b605fdb88f 100644 --- a/monai/metrics/surface_distance.py +++ b/monai/metrics/surface_distance.py @@ -120,8 +120,10 @@ def compute_average_surface_distance( y=y, ) - y = y.float() - y_pred = y_pred.float() + if isinstance(y, torch.Tensor): + y = y.float() + if isinstance(y_pred, torch.Tensor): + y_pred = y_pred.float() if y.shape != y_pred.shape: raise ValueError("y_pred and y should have same shapes.") @@ -135,7 +137,7 @@ def compute_average_surface_distance( if surface_distance.shape == (0,): avg_surface_distance = np.nan else: - avg_surface_distance = surface_distance.mean() + avg_surface_distance = surface_distance.mean() # type: ignore if not symmetric: asd[b, c] = avg_surface_distance else: @@ -143,7 +145,7 @@ def compute_average_surface_distance( if surface_distance_2.shape == (0,): avg_surface_distance_2 = np.nan else: - avg_surface_distance_2 = surface_distance_2.mean() + avg_surface_distance_2 = surface_distance_2.mean() # type: ignore asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2)) return torch.from_numpy(asd) diff --git a/monai/metrics/utils.py b/monai/metrics/utils.py index cc7049ff81..0a254d9901 100644 --- a/monai/metrics/utils.py +++ b/monai/metrics/utils.py @@ -26,8 +26,8 @@ def ignore_background( - y_pred: torch.Tensor, - y: torch.Tensor, + y_pred: Union[np.ndarray, torch.Tensor], + y: Union[np.ndarray, torch.Tensor], ): """ This function is used to remove background (the first channel) for `y_pred` and `y`. @@ -138,9 +138,9 @@ def get_mask_edges( """ # Get both labelfields as np arrays - if torch.is_tensor(seg_pred): + if isinstance(seg_pred, torch.Tensor): seg_pred = seg_pred.detach().cpu().numpy() - if torch.is_tensor(seg_gt): + if isinstance(seg_gt, torch.Tensor): seg_gt = seg_gt.detach().cpu().numpy() if seg_pred.shape != seg_gt.shape: @@ -157,7 +157,7 @@ def get_mask_edges( return (np.zeros_like(seg_pred), np.zeros_like(seg_gt)) seg_pred, seg_gt = np.expand_dims(seg_pred, 0), np.expand_dims(seg_gt, 0) - box_start, box_end = generate_spatial_bounding_box(seg_pred | seg_gt) + box_start, box_end = generate_spatial_bounding_box(np.asarray(seg_pred | seg_gt)) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) seg_pred, seg_gt = np.squeeze(cropper(seg_pred)), np.squeeze(cropper(seg_gt)) @@ -192,7 +192,7 @@ def get_surface_distance( else: if not np.any(seg_pred): dis = np.inf * np.ones_like(seg_gt) - return dis[seg_gt] + return np.asarray(dis[seg_gt]) if distance_metric == "euclidean": dis = distance_transform_edt(~seg_gt) elif distance_metric in ["chessboard", "taxicab"]: @@ -200,4 +200,4 @@ def get_surface_distance( else: raise ValueError(f"distance_metric {distance_metric} is not implemented.") - return dis[seg_pred] + return np.asarray(dis[seg_pred]) diff --git a/monai/networks/layers/convutils.py b/monai/networks/layers/convutils.py index c4f798699c..994ca05b85 100644 --- a/monai/networks/layers/convutils.py +++ b/monai/networks/layers/convutils.py @@ -57,7 +57,7 @@ def stride_minus_kernel_padding( def calculate_out_shape( - in_shape: Union[Sequence[int], int], + in_shape: Union[Sequence[int], int, np.ndarray], kernel_size: Union[Sequence[int], int], stride: Union[Sequence[int], int], padding: Union[Sequence[int], int], @@ -104,7 +104,7 @@ def gaussian_1d( 1D torch tensor """ - sigma = torch.as_tensor(sigma, dtype=torch.float, device=sigma.device if torch.is_tensor(sigma) else None) + sigma = torch.as_tensor(sigma, dtype=torch.float, device=sigma.device if isinstance(sigma, torch.Tensor) else None) device = sigma.device if truncated <= 0.0: raise ValueError(f"truncated must be positive, got {truncated}.") @@ -149,7 +149,7 @@ def polyval(coef, x) -> torch.Tensor: Returns: 1D torch tensor """ - device = x.device if torch.is_tensor(x) else None + device = x.device if isinstance(x, torch.Tensor) else None coef = torch.as_tensor(coef, dtype=torch.float, device=device) if coef.ndim == 0 or (len(coef) < 1): return torch.zeros(x.shape) @@ -161,7 +161,7 @@ def polyval(coef, x) -> torch.Tensor: def _modified_bessel_0(x: torch.Tensor) -> torch.Tensor: - x = torch.as_tensor(x, dtype=torch.float, device=x.device if torch.is_tensor(x) else None) + x = torch.as_tensor(x, dtype=torch.float, device=x.device if isinstance(x, torch.Tensor) else None) if torch.abs(x) < 3.75: y = x * x / 14.0625 return polyval([0.45813e-2, 0.360768e-1, 0.2659732, 1.2067492, 3.0899424, 3.5156229, 1.0], y) @@ -182,7 +182,7 @@ def _modified_bessel_0(x: torch.Tensor) -> torch.Tensor: def _modified_bessel_1(x: torch.Tensor) -> torch.Tensor: - x = torch.as_tensor(x, dtype=torch.float, device=x.device if torch.is_tensor(x) else None) + x = torch.as_tensor(x, dtype=torch.float, device=x.device if isinstance(x, torch.Tensor) else None) if torch.abs(x) < 3.75: y = x * x / 14.0625 _coef = [0.32411e-3, 0.301532e-2, 0.2658733e-1, 0.15084934, 0.51498869, 0.87890594, 0.5] @@ -207,7 +207,7 @@ def _modified_bessel_1(x: torch.Tensor) -> torch.Tensor: def _modified_bessel_i(n: int, x: torch.Tensor) -> torch.Tensor: if n < 2: raise ValueError(f"n must be greater than 1, got n={n}.") - x = torch.as_tensor(x, dtype=torch.float, device=x.device if torch.is_tensor(x) else None) + x = torch.as_tensor(x, dtype=torch.float, device=x.device if isinstance(x, torch.Tensor) else None) if x == 0.0: return x device = x.device diff --git a/monai/networks/layers/filtering.py b/monai/networks/layers/filtering.py index 83a33bc609..1bec725c7e 100644 --- a/monai/networks/layers/filtering.py +++ b/monai/networks/layers/filtering.py @@ -62,7 +62,7 @@ class PHLFilter(torch.autograd.Function): """ Filters input based on arbitrary feature vectors. Uses a permutohedral lattice data structure to efficiently approximate n-dimensional gaussian - filtering. Complexity is broadly independant of kernel size. Most applicable + filtering. Complexity is broadly independent of kernel size. Most applicable to higher filter dimensions and larger kernel sizes. See: diff --git a/monai/networks/layers/simplelayers.py b/monai/networks/layers/simplelayers.py index 285b0d629f..f560526db8 100644 --- a/monai/networks/layers/simplelayers.py +++ b/monai/networks/layers/simplelayers.py @@ -182,12 +182,12 @@ def separable_filtering( TypeError: When ``x`` is not a ``torch.Tensor``. """ - if not torch.is_tensor(x): + if not isinstance(x, torch.Tensor): raise TypeError(f"x must be a torch.Tensor but is {type(x).__name__}.") spatial_dims = len(x.shape) - 2 _kernels = [ - torch.as_tensor(s, dtype=torch.float, device=s.device if torch.is_tensor(s) else None) + torch.as_tensor(s, dtype=torch.float, device=s.device if isinstance(s, torch.Tensor) else None) for s in ensure_tuple_rep(kernels, spatial_dims) ] _paddings = [cast(int, (same_padding(k.shape[0]))) for k in _kernels] @@ -251,7 +251,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ # Make input a real tensor on the CPU - x = torch.as_tensor(x, device=x.device if torch.is_tensor(x) else None) + x = torch.as_tensor(x, device=x.device if isinstance(x, torch.Tensor) else None) if torch.is_complex(x): raise ValueError("x must be real.") else: @@ -317,7 +317,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: """ # Make input a real tensor - x = torch.as_tensor(x, device=x.device if torch.is_tensor(x) else None) + x = torch.as_tensor(x, device=x.device if isinstance(x, torch.Tensor) else None) if torch.is_complex(x): raise ValueError("x must be real.") x = x.to(dtype=torch.float) @@ -384,7 +384,7 @@ def __init__( super().__init__() self.sigma = [ torch.nn.Parameter( - torch.as_tensor(s, dtype=torch.float, device=s.device if torch.is_tensor(s) else None), + torch.as_tensor(s, dtype=torch.float, device=s.device if isinstance(s, torch.Tensor) else None), requires_grad=requires_grad, ) for s in ensure_tuple_rep(sigma, int(spatial_dims)) diff --git a/monai/networks/layers/spatial_transforms.py b/monai/networks/layers/spatial_transforms.py index c0f22502c8..175fd05694 100644 --- a/monai/networks/layers/spatial_transforms.py +++ b/monai/networks/layers/spatial_transforms.py @@ -487,7 +487,7 @@ def forward( """ # validate `theta` - if not torch.is_tensor(theta): + if not isinstance(theta, torch.Tensor): raise TypeError(f"theta must be torch.Tensor but is {type(theta).__name__}.") if theta.dim() not in (2, 3): raise ValueError(f"theta must be Nxdxd or dxd, got {theta.shape}.") @@ -504,7 +504,7 @@ def forward( raise ValueError(f"theta must be Nx3x3 or Nx4x4, got {theta.shape}.") # validate `src` - if not torch.is_tensor(src): + if not isinstance(src, torch.Tensor): raise TypeError(f"src must be torch.Tensor but is {type(src).__name__}.") sr = src.dim() - 2 # input spatial rank if sr not in (2, 3): diff --git a/monai/networks/nets/localnet.py b/monai/networks/nets/localnet.py index ea8abca185..e9df68104d 100644 --- a/monai/networks/nets/localnet.py +++ b/monai/networks/nets/localnet.py @@ -99,7 +99,7 @@ def forward(self, x) -> torch.Tensor: if size % (2 ** self.extract_max_level) != 0: raise ValueError( f"given extract_max_level {self.extract_max_level}, " - f"all input spatial dimension must be devidable by {2 ** self.extract_max_level}, " + f"all input spatial dimension must be divisible by {2 ** self.extract_max_level}, " f"got input of size {image_size}" ) mid_features = [] # 0 -> self.extract_max_level - 1 diff --git a/monai/networks/nets/regressor.py b/monai/networks/nets/regressor.py index a1abadb6ba..d64ad2fc10 100644 --- a/monai/networks/nets/regressor.py +++ b/monai/networks/nets/regressor.py @@ -78,7 +78,7 @@ def __init__( padding = same_padding(kernel_size) - self.final_size = np.asarray(self.in_shape, np.int) + self.final_size = np.asarray(self.in_shape, dtype=int) self.reshape = Reshape(*self.out_shape) # encode stage @@ -86,7 +86,7 @@ def __init__( layer = self._get_layer(echannel, c, s, i == len(channels) - 1) echannel = c # use the output channel number as the input for the next loop self.net.add_module("layer_%i" % i, layer) - self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) + self.final_size = calculate_out_shape(self.final_size, kernel_size, s, padding) # type: ignore self.final = self._get_final_layer((echannel,) + self.final_size) diff --git a/monai/networks/nets/varautoencoder.py b/monai/networks/nets/varautoencoder.py index b68350e8b1..30ee806dbb 100644 --- a/monai/networks/nets/varautoencoder.py +++ b/monai/networks/nets/varautoencoder.py @@ -46,7 +46,7 @@ def __init__( self.in_channels, *self.in_shape = in_shape self.latent_size = latent_size - self.final_size = np.asarray(self.in_shape, np.int) + self.final_size = np.asarray(self.in_shape, dtype=int) super().__init__( dimensions, @@ -68,7 +68,7 @@ def __init__( padding = same_padding(self.kernel_size) for s in strides: - self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) + self.final_size = calculate_out_shape(self.final_size, self.kernel_size, s, padding) # type: ignore linear_size = int(np.product(self.final_size)) * self.encoded_channels self.mu = nn.Linear(linear_size, self.latent_size) diff --git a/monai/networks/utils.py b/monai/networks/utils.py index 175d3d8b73..847bfc97c2 100644 --- a/monai/networks/utils.py +++ b/monai/networks/utils.py @@ -150,7 +150,7 @@ def to_norm_affine( ValueError: When ``src_size`` or ``dst_size`` dimensions differ from ``affine``. """ - if not torch.is_tensor(affine): + if not isinstance(affine, torch.Tensor): raise TypeError(f"affine must be a torch.Tensor but is {type(affine).__name__}.") if affine.ndimension() != 3 or affine.shape[1] != affine.shape[2]: raise ValueError(f"affine must be Nxdxd, got {tuple(affine.shape)}.") diff --git a/monai/optimizers/lr_finder.py b/monai/optimizers/lr_finder.py index 6ad4132dd0..9e753a1ced 100644 --- a/monai/optimizers/lr_finder.py +++ b/monai/optimizers/lr_finder.py @@ -5,7 +5,6 @@ import numpy as np import torch import torch.nn as nn -from numpy.core.arrayprint import _none_or_positive_arg from torch.optim import Optimizer from torch.utils.data import DataLoader @@ -363,7 +362,7 @@ def _set_learning_rate(self, new_lrs: Union[float, list]) -> None: for param_group, new_lr in zip(self.optimizer.param_groups, new_lrs): param_group["lr"] = new_lr - def _check_for_scheduler(self) -> _none_or_positive_arg: + def _check_for_scheduler(self): """Check optimizer doesn't already have scheduler.""" for param_group in self.optimizer.param_groups: if "initial_lr" in param_group: diff --git a/monai/transforms/croppad/array.py b/monai/transforms/croppad/array.py index e59eb89ac7..b4444803a4 100644 --- a/monai/transforms/croppad/array.py +++ b/monai/transforms/croppad/array.py @@ -16,6 +16,7 @@ from typing import Any, Callable, List, Optional, Sequence, Tuple, Union import numpy as np +import torch from monai.config import IndexSelection from monai.data.utils import get_random_patch, get_valid_patch_size @@ -128,7 +129,7 @@ def __init__( self.spatial_border = spatial_border self.mode: NumpyPadMode = NumpyPadMode(mode) - def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = None) -> np.ndarray: + def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = None): """ Args: img: data to be transformed, assuming `img` is channel-first and @@ -219,10 +220,10 @@ class SpatialCrop(Transform): def __init__( self, - roi_center: Optional[Sequence[int]] = None, - roi_size: Optional[Sequence[int]] = None, - roi_start: Optional[Sequence[int]] = None, - roi_end: Optional[Sequence[int]] = None, + roi_center: Union[Sequence[int], np.ndarray, None] = None, + roi_size: Union[Sequence[int], np.ndarray, None] = None, + roi_start: Union[Sequence[int], np.ndarray, None] = None, + roi_end: Union[Sequence[int], np.ndarray, None] = None, ) -> None: """ Args: @@ -242,14 +243,14 @@ def __init__( self.roi_start = np.maximum(np.asarray(roi_start, dtype=np.int16), 0) self.roi_end = np.maximum(np.asarray(roi_end, dtype=np.int16), self.roi_start) - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> np.ndarray: """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. """ sd = min(len(self.roi_start), len(self.roi_end), len(img.shape[1:])) # spatial dims slices = [slice(None)] + [slice(s, e) for s, e in zip(self.roi_start[:sd], self.roi_end[:sd])] - return img[tuple(slices)] + return np.asarray(img[tuple(slices)]) class CenterSpatialCrop(Transform): @@ -264,7 +265,7 @@ class CenterSpatialCrop(Transform): def __init__(self, roi_size: Union[Sequence[int], int]) -> None: self.roi_size = roi_size - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. @@ -306,7 +307,7 @@ def randomize(self, img_size: Sequence[int]) -> None: valid_size = get_valid_patch_size(img_size, self._size) self._slices = (slice(None),) + get_random_patch(img_size, valid_size, self.R) - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Apply the transform to `img`, assuming `img` is channel-first and slicing doesn't apply to the channel dim. @@ -590,6 +591,8 @@ def __call__( """ if label is None: label = self.label + if label is None: + raise ValueError("label should be provided.") if image is None: image = self.image if fg_indices is None or bg_indices is None: @@ -602,7 +605,7 @@ def __call__( results: List[np.ndarray] = [] if self.centers is not None: for center in self.centers: - cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) + cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore results.append(cropper(img)) return results diff --git a/monai/transforms/croppad/dictionary.py b/monai/transforms/croppad/dictionary.py index 8bf33dd632..1faed25605 100644 --- a/monai/transforms/croppad/dictionary.py +++ b/monai/transforms/croppad/dictionary.py @@ -412,8 +412,8 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.nda box_start, box_end = generate_spatial_bounding_box( d[self.source_key], self.select_fn, self.channel_indices, self.margin ) - d[self.start_coord_key] = box_start - d[self.end_coord_key] = box_end + d[self.start_coord_key] = np.asarray(box_start) + d[self.end_coord_key] = np.asarray(box_end) cropper = SpatialCrop(roi_start=box_start, roi_end=box_end) for key in self.keys: d[key] = cropper(d[key]) @@ -583,7 +583,7 @@ def __call__(self, data: Mapping[Hashable, np.ndarray]) -> List[Dict[Hashable, n if key in self.keys: img = d[key] for i, center in enumerate(self.centers): - cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) + cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size) # type: ignore results[i][key] = cropper(img) else: for i in range(self.num_samples): diff --git a/monai/transforms/intensity/array.py b/monai/transforms/intensity/array.py index 205b719246..87091f6237 100644 --- a/monai/transforms/intensity/array.py +++ b/monai/transforms/intensity/array.py @@ -20,6 +20,7 @@ import numpy as np import torch +from monai.config import DtypeLike from monai.networks.layers import GaussianFilter, HilbertTransform, SavitzkyGolayFilter from monai.transforms.compose import Randomizable, Transform from monai.transforms.utils import rescale_array @@ -97,7 +98,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: """ Apply the transform to `img`. """ - return (img + self.offset).astype(img.dtype) + return np.asarray((img + self.offset), dtype=img.dtype) class RandShiftIntensity(Randomizable, Transform): @@ -165,9 +166,9 @@ def __call__(self, img: np.ndarray) -> np.ndarray: """ if self.minv is not None and self.maxv is not None: - return rescale_array(img, self.minv, self.maxv, img.dtype) + return np.asarray(rescale_array(img, self.minv, self.maxv, img.dtype)) if self.factor is not None: - return (img * (1 + self.factor)).astype(img.dtype) + return np.asarray(img * (1 + self.factor), dtype=img.dtype) raise ValueError("Incompatible values: minv=None or maxv=None and factor=None.") @@ -229,11 +230,11 @@ class NormalizeIntensity(Transform): def __init__( self, - subtrahend: Optional[Sequence] = None, - divisor: Optional[Sequence] = None, + subtrahend: Union[Sequence, np.ndarray, None] = None, + divisor: Union[Sequence, np.ndarray, None] = None, nonzero: bool = False, channel_wise: bool = False, - dtype: np.dtype = np.float32, + dtype: DtypeLike = np.float32, ) -> None: self.subtrahend = subtrahend self.divisor = divisor @@ -304,7 +305,9 @@ def __call__(self, img: np.ndarray) -> np.ndarray: """ Apply the transform to `img`. """ - return np.where(img > self.threshold if self.above else img < self.threshold, img, self.cval).astype(img.dtype) + return np.asarray( + np.where(img > self.threshold if self.above else img < self.threshold, img, self.cval), dtype=img.dtype + ) class ScaleIntensityRange(Transform): @@ -327,7 +330,7 @@ def __init__(self, a_min: float, a_max: float, b_min: float, b_max: float, clip: self.b_max = b_max self.clip = clip - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Apply the transform to `img`. """ @@ -338,8 +341,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: img = (img - self.a_min) / (self.a_max - self.a_min) img = img * (self.b_max - self.b_min) + self.b_min if self.clip: - img = np.clip(img, self.b_min, self.b_max) - + img = np.asarray(np.clip(img, self.b_min, self.b_max)) return img @@ -358,7 +360,7 @@ def __init__(self, gamma: float) -> None: raise AssertionError("gamma must be a float or int number.") self.gamma = gamma - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Apply the transform to `img`. """ @@ -483,7 +485,7 @@ def __init__( self.clip = clip self.relative = relative - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Apply the transform to `img`. """ @@ -500,7 +502,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: img = scalar(img) if self.clip: - img = np.clip(img, self.b_min, self.b_max) + img = np.asarray(np.clip(img, self.b_min, self.b_max)) return img @@ -513,36 +515,44 @@ class MaskIntensity(Transform): data will be set to `0`, others will keep the original value. Args: - mask_data: if mask data is single channel, apply to evey channel - of input image. if multiple channels, the channel number must - match input data. mask_data will be converted to `bool` values + mask_data: if `mask_data` is single channel, apply to every channel + of input image. if multiple channels, the number of channels must + match the input data. `mask_data` will be converted to `bool` values by `mask_data > 0` before applying transform to input image. """ - def __init__(self, mask_data: np.ndarray) -> None: + def __init__(self, mask_data: Optional[np.ndarray]) -> None: self.mask_data = mask_data def __call__(self, img: np.ndarray, mask_data: Optional[np.ndarray] = None) -> np.ndarray: """ Args: - mask_data: if mask data is single channel, apply to evey channel + mask_data: if mask data is single channel, apply to every channel of input image. if multiple channels, the channel number must match input data. mask_data will be converted to `bool` values by `mask_data > 0` before applying transform to input image. Raises: - ValueError: When ``mask_data`` and ``img`` channels differ and ``mask_data`` is not single channel. - - """ - mask_data_ = self.mask_data > 0 if mask_data is None else mask_data > 0 + - ValueError: When both ``mask_data`` and ``self.mask_data`` are None. + - ValueError: When ``mask_data`` and ``img`` channels differ and ``mask_data`` is not single channel. + + """ + if self.mask_data is None and mask_data is None: + raise ValueError("Unknown mask_data.") + mask_data_ = np.array([[1]]) + if self.mask_data is not None and mask_data is None: + mask_data_ = self.mask_data > 0 + if mask_data is not None: + mask_data_ = mask_data > 0 + mask_data_ = np.asarray(mask_data_) if mask_data_.shape[0] != 1 and mask_data_.shape[0] != img.shape[0]: raise ValueError( "When mask_data is not single channel, mask_data channels must match img, " f"got img={img.shape[0]} mask_data={mask_data_.shape[0]}." ) - return img * mask_data_ + return np.asarray(img * mask_data_) class SavitzkyGolaySmooth(Transform): @@ -567,7 +577,7 @@ def __init__(self, window_length: int, order: int, axis: int = 1, mode: str = "z self.axis = axis self.mode = mode - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Args: img: numpy.ndarray containing input data. Must be real and in shape [channels, spatial1, spatial2, ...]. @@ -606,7 +616,7 @@ def __init__(self, axis: int = 1, n: Union[int, None] = None) -> None: self.axis = axis self.n = n - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): """ Args: @@ -641,7 +651,7 @@ def __init__(self, sigma: Union[Sequence[float], float] = 1.0, approx: str = "er self.sigma = sigma self.approx = approx - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): gaussian_filter = GaussianFilter(img.ndim - 1, self.sigma, approx=self.approx) input_data = torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0) return gaussian_filter(input_data).squeeze(0).detach().numpy() @@ -682,7 +692,7 @@ def randomize(self, data: Optional[Any] = None) -> None: self.y = self.R.uniform(low=self.sigma_y[0], high=self.sigma_y[1]) self.z = self.R.uniform(low=self.sigma_z[0], high=self.sigma_z[1]) - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): self.randomize() if not self._do_transform: return img @@ -729,7 +739,7 @@ def __init__( self.alpha = alpha self.approx = approx - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): gaussian_filter1 = GaussianFilter(img.ndim - 1, self.sigma1, approx=self.approx) gaussian_filter2 = GaussianFilter(img.ndim - 1, self.sigma2, approx=self.approx) input_data = torch.as_tensor(np.ascontiguousarray(img), dtype=torch.float).unsqueeze(0) @@ -796,7 +806,7 @@ def randomize(self, data: Optional[Any] = None) -> None: self.z2 = self.R.uniform(low=sigma2_z[0], high=sigma2_z[1]) self.a = self.R.uniform(low=self.alpha[0], high=self.alpha[1]) - def __call__(self, img: np.ndarray) -> np.ndarray: + def __call__(self, img: np.ndarray): self.randomize() if not self._do_transform: return img @@ -848,4 +858,6 @@ def __call__(self, img: np.ndarray) -> np.ndarray: img_min, img_max = img.min(), img.max() reference_control_points_scaled = self.reference_control_points * (img_max - img_min) + img_min floating_control_points_scaled = self.floating_control_points * (img_max - img_min) + img_min - return np.interp(img, reference_control_points_scaled, floating_control_points_scaled).astype(img.dtype) + return np.asarray( + np.interp(img, reference_control_points_scaled, floating_control_points_scaled), dtype=img.dtype + ) diff --git a/monai/transforms/intensity/dictionary.py b/monai/transforms/intensity/dictionary.py index 18e2250084..48f0657ab0 100644 --- a/monai/transforms/intensity/dictionary.py +++ b/monai/transforms/intensity/dictionary.py @@ -21,7 +21,7 @@ import numpy as np import torch -from monai.config import KeysCollection +from monai.config import DtypeLike, KeysCollection from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.intensity.array import ( AdjustContrast, @@ -294,7 +294,7 @@ def __init__( divisor: Optional[np.ndarray] = None, nonzero: bool = False, channel_wise: bool = False, - dtype: np.dtype = np.float32, + dtype: DtypeLike = np.float32, ) -> None: super().__init__(keys) self.normalizer = NormalizeIntensity(subtrahend, divisor, nonzero, channel_wise, dtype) @@ -474,7 +474,7 @@ class MaskIntensityd(MapTransform): Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` - mask_data: if mask data is single channel, apply to evey channel + mask_data: if mask data is single channel, apply to every channel of input image. if multiple channels, the channel number must match input data. mask_data will be converted to `bool` values by `mask_data > 0` before applying transform to input image. diff --git a/monai/transforms/io/array.py b/monai/transforms/io/array.py index 3b359cc460..772c7cf74f 100644 --- a/monai/transforms/io/array.py +++ b/monai/transforms/io/array.py @@ -17,6 +17,7 @@ import numpy as np +from monai.config import DtypeLike from monai.data.image_reader import ImageReader, ITKReader, NibabelReader, NumpyReader, PILReader from monai.transforms.compose import Transform from monai.utils import ensure_tuple, optional_import @@ -42,7 +43,7 @@ def __init__( self, reader: Optional[Union[ImageReader, str]] = None, image_only: bool = False, - dtype: np.dtype = np.float32, + dtype: DtypeLike = np.float32, *args, **kwargs, ) -> None: diff --git a/monai/transforms/io/dictionary.py b/monai/transforms/io/dictionary.py index 62ac4c8562..40737374cf 100644 --- a/monai/transforms/io/dictionary.py +++ b/monai/transforms/io/dictionary.py @@ -19,7 +19,7 @@ import numpy as np -from monai.config import KeysCollection +from monai.config import DtypeLike, KeysCollection from monai.data.image_reader import ImageReader from monai.transforms.compose import MapTransform from monai.transforms.io.array import LoadImage @@ -52,7 +52,7 @@ def __init__( self, keys: KeysCollection, reader: Optional[Union[ImageReader, str]] = None, - dtype: Optional[np.dtype] = np.float32, + dtype: DtypeLike = np.float32, meta_key_postfix: str = "meta_dict", overwriting: bool = False, *args, diff --git a/monai/transforms/spatial/array.py b/monai/transforms/spatial/array.py index 3e1ded4e94..75a25459e8 100644 --- a/monai/transforms/spatial/array.py +++ b/monai/transforms/spatial/array.py @@ -19,7 +19,7 @@ import numpy as np import torch -from monai.config import USE_COMPILED +from monai.config import USE_COMPILED, DtypeLike from monai.data.utils import compute_shape_offset, to_affine_nd, zoom_affine from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull from monai.transforms.compose import Randomizable, Transform @@ -81,7 +81,7 @@ def __init__( mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, align_corners: bool = False, - dtype: Optional[np.dtype] = np.float64, + dtype: DtypeLike = np.float64, ) -> None: """ Args: @@ -123,7 +123,7 @@ def __call__( mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, align_corners: Optional[bool] = None, - dtype: Optional[np.dtype] = None, + dtype: DtypeLike = None, ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Args: @@ -192,7 +192,7 @@ def __call__( torch.as_tensor(np.ascontiguousarray(transform).astype(_dtype)), spatial_size=output_shape, ) - output_data = output_data.squeeze(0).detach().cpu().numpy().astype(np.float32) + output_data = np.asarray(output_data.squeeze(0).detach().cpu().numpy(), dtype=np.float32) # type: ignore new_affine = to_affine_nd(affine, new_affine) return output_data, affine, new_affine @@ -372,7 +372,7 @@ def __call__( align_corners=self.align_corners if align_corners is None else align_corners, ) resized = resized.squeeze(0).detach().cpu().numpy() - return resized + return np.asarray(resized) class Rotate(Transform): @@ -404,7 +404,7 @@ def __init__( mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, align_corners: bool = False, - dtype: Optional[np.dtype] = np.float64, + dtype: DtypeLike = np.float64, ) -> None: self.angle = angle self.keep_size = keep_size @@ -419,7 +419,7 @@ def __call__( mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, align_corners: Optional[bool] = None, - dtype: Optional[np.dtype] = None, + dtype: DtypeLike = None, ) -> np.ndarray: """ Args: @@ -457,7 +457,7 @@ def __call__( (len(im_shape), -1) ) corners = transform[:-1, :-1] @ corners - output_shape = (corners.ptp(axis=1) + 0.5).astype(int) + output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int) shift_1 = create_translate(input_ndim, -(output_shape - 1) / 2) transform = shift @ transform @ shift_1 @@ -473,8 +473,7 @@ def __call__( torch.as_tensor(np.ascontiguousarray(transform).astype(_dtype)), spatial_size=output_shape, ) - output = output.squeeze(0).detach().cpu().numpy().astype(np.float32) - return output + return np.asarray(output.squeeze(0).detach().cpu().numpy(), dtype=np.float32) class Zoom(Transform): @@ -522,7 +521,7 @@ def __call__( mode: Optional[Union[InterpolateMode, str]] = None, padding_mode: Optional[Union[NumpyPadMode, str]] = None, align_corners: Optional[bool] = None, - ) -> np.ndarray: + ): """ Args: img: channel first array, must have shape: (num_channels, H[, W, ..., ]). @@ -670,7 +669,7 @@ def __init__( mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, align_corners: bool = False, - dtype: Optional[np.dtype] = np.float64, + dtype: DtypeLike = np.float64, ) -> None: self.range_x = ensure_tuple(range_x) if len(self.range_x) == 1: @@ -706,7 +705,7 @@ def __call__( mode: Optional[Union[GridSampleMode, str]] = None, padding_mode: Optional[Union[GridSamplePadMode, str]] = None, align_corners: Optional[bool] = None, - dtype: Optional[np.dtype] = None, + dtype: DtypeLike = None, ) -> np.ndarray: """ Args: @@ -856,12 +855,15 @@ def __call__( # if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1]) zoomer = Zoom(self._zoom, keep_size=self.keep_size) - return zoomer( - img, - mode=mode or self.mode, - padding_mode=padding_mode or self.padding_mode, - align_corners=self.align_corners if align_corners is None else align_corners, - ).astype(_dtype) + return np.asarray( + zoomer( + img, + mode=mode or self.mode, + padding_mode=padding_mode or self.padding_mode, + align_corners=self.align_corners if align_corners is None else align_corners, + ), + dtype=_dtype, + ) class AffineGrid(Transform): @@ -937,13 +939,15 @@ def __call__( affine = affine @ create_scale(spatial_dims, self.scale_params) affine = torch.as_tensor(np.ascontiguousarray(affine), device=self.device) - grid = torch.tensor(grid) if not torch.is_tensor(grid) else grid.detach().clone() + grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone() if self.device: grid = grid.to(self.device) grid = (affine.float() @ grid.reshape((grid.shape[0], -1)).float()).reshape([-1] + list(grid.shape[1:])) + if grid is None or not isinstance(grid, torch.Tensor): + raise ValueError("Unknown grid.") if self.as_tensor_output: return grid - return grid.cpu().numpy() + return np.asarray(grid.cpu().numpy()) class RandAffineGrid(Randomizable, Transform): @@ -1069,7 +1073,7 @@ def randomize(self, grid_size: Sequence[int]) -> None: self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32) self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1]) - def __call__(self, spatial_size: Sequence[int]) -> Union[np.ndarray, torch.Tensor]: + def __call__(self, spatial_size: Sequence[int]): """ Args: spatial_size: spatial size of the grid. @@ -1129,11 +1133,11 @@ def __call__( See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample """ - if not torch.is_tensor(img): + if not isinstance(img, torch.Tensor): img = torch.as_tensor(np.ascontiguousarray(img)) if grid is None: raise AssertionError("Error, grid argument must be supplied as an ndarray or tensor ") - grid = torch.tensor(grid) if not torch.is_tensor(grid) else grid.detach().clone() + grid = torch.tensor(grid) if not isinstance(grid, torch.Tensor) else grid.detach().clone() if self.device: img = img.to(self.device) grid = grid.to(self.device) @@ -1173,8 +1177,8 @@ def __call__( align_corners=True, )[0] if self.as_tensor_output: - return out - return out.cpu().numpy() + return torch.as_tensor(out) + return np.asarray(out.cpu().numpy()) class Affine(Transform): @@ -1499,12 +1503,12 @@ def __call__( grid = self.rand_affine_grid(grid=grid) grid = torch.nn.functional.interpolate( # type: ignore recompute_scale_factor=True, - input=grid.unsqueeze(0), + input=torch.as_tensor(grid).unsqueeze(0), scale_factor=list(ensure_tuple(self.deform_grid.spacing)), mode=InterpolateMode.BICUBIC.value, align_corners=False, ) - grid = CenterSpatialCrop(roi_size=sp_size)(grid[0]) + grid = CenterSpatialCrop(roi_size=sp_size)(np.asarray(grid[0])) else: grid = create_grid(spatial_size=sp_size) return self.resampler(img, grid, mode=mode or self.mode, padding_mode=padding_mode or self.padding_mode) diff --git a/monai/transforms/spatial/dictionary.py b/monai/transforms/spatial/dictionary.py index 615a327d90..e612a25ef8 100644 --- a/monai/transforms/spatial/dictionary.py +++ b/monai/transforms/spatial/dictionary.py @@ -20,7 +20,7 @@ import numpy as np import torch -from monai.config import KeysCollection +from monai.config import DtypeLike, KeysCollection from monai.networks.layers.simplelayers import GaussianFilter from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.croppad.array import CenterSpatialCrop @@ -120,7 +120,7 @@ def __init__( mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER, align_corners: Union[Sequence[bool], bool] = False, - dtype: Optional[Union[Sequence[np.dtype], np.dtype]] = np.float64, + dtype: Optional[Union[Sequence[DtypeLike], DtypeLike]] = np.float64, meta_key_postfix: str = "meta_dict", ) -> None: """ @@ -152,7 +152,7 @@ def __init__( dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. If None, use the data type of input data. To be compatible with other modules, the output data type is always ``np.float32``. - It also can be a sequence of np.dtype, each element corresponds to a key in ``keys``. + It also can be a sequence of dtypes, each element corresponds to a key in ``keys``. meta_key_postfix: use `key_{postfix}` to to fetch the meta data according to the key data, default is `meta_dict`, the meta data is a dictionary object. For example, to handle key `image`, read/write affine matrices from the @@ -175,13 +175,13 @@ def __init__( def __call__( self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]] ) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]: - d = dict(data) + d: Dict = dict(data) for idx, key in enumerate(self.keys): meta_data = d[f"{key}_{self.meta_key_postfix}"] # resample array of each corresponding key # using affine fetched from d[affine_key] d[key], _, new_affine = self.spacing_transform( - data_array=d[key], + data_array=np.asarray(d[key]), affine=meta_data["affine"], mode=self.mode[idx], padding_mode=self.padding_mode[idx], @@ -244,7 +244,7 @@ def __init__( def __call__( self, data: Mapping[Union[Hashable, str], Dict[str, np.ndarray]] ) -> Dict[Union[Hashable, str], Union[np.ndarray, Dict[str, np.ndarray]]]: - d = dict(data) + d: Dict = dict(data) for key in self.keys: meta_data = d[f"{key}_{self.meta_key_postfix}"] d[key], _, new_affine = self.ornt_transform(d[key], affine=meta_data["affine"]) @@ -796,7 +796,7 @@ def __init__( mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER, align_corners: Union[Sequence[bool], bool] = False, - dtype: Union[Sequence[Optional[np.dtype]], Optional[np.dtype]] = np.float64, + dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64, ) -> None: super().__init__(keys) self.rotator = Rotate(angle=angle, keep_size=keep_size) @@ -864,7 +864,7 @@ def __init__( mode: GridSampleModeSequence = GridSampleMode.BILINEAR, padding_mode: GridSamplePadModeSequence = GridSamplePadMode.BORDER, align_corners: Union[Sequence[bool], bool] = False, - dtype: Union[Sequence[Optional[np.dtype]], Optional[np.dtype]] = np.float64, + dtype: Union[Sequence[DtypeLike], DtypeLike] = np.float64, ) -> None: super().__init__(keys) self.range_x = ensure_tuple(range_x) diff --git a/monai/transforms/utility/array.py b/monai/transforms/utility/array.py index 5476e800f4..c0ae40de59 100644 --- a/monai/transforms/utility/array.py +++ b/monai/transforms/utility/array.py @@ -15,11 +15,12 @@ import logging import time -from typing import Callable, List, Optional, Sequence, Tuple, TypeVar, Union +from typing import Callable, List, Optional, Sequence, Tuple, Union import numpy as np import torch +from monai.config import DtypeLike, NdarrayTensor from monai.transforms.compose import Randomizable, Transform from monai.transforms.utils import extreme_points_to_image, get_extreme_points, map_binary_to_indices from monai.utils import ensure_tuple, min_version, optional_import @@ -46,10 +47,6 @@ "TorchVision", ] -# Generic type which can represent either a numpy.ndarray or a torch.Tensor -# Unlike Union can create a dependence between parameter(s) / return(s) -NdarrayTensor = TypeVar("NdarrayTensor", np.ndarray, torch.Tensor) - class Identity(Transform): """ @@ -135,7 +132,7 @@ class AddChannel(Transform): transforms. """ - def __call__(self, img: NdarrayTensor) -> NdarrayTensor: + def __call__(self, img: NdarrayTensor): """ Apply the transform to `img`. """ @@ -209,7 +206,7 @@ class CastToType(Transform): specified PyTorch data type. """ - def __init__(self, dtype: Union[np.dtype, torch.dtype] = np.float32) -> None: + def __init__(self, dtype=np.float32) -> None: """ Args: dtype: convert image to this data type, default is `np.float32`. @@ -217,7 +214,7 @@ def __init__(self, dtype: Union[np.dtype, torch.dtype] = np.float32) -> None: self.dtype = dtype def __call__( - self, img: Union[np.ndarray, torch.Tensor], dtype: Optional[Union[np.dtype, torch.dtype]] = None + self, img: Union[np.ndarray, torch.Tensor], dtype: Optional[Union[DtypeLike, torch.dtype]] = None ) -> Union[np.ndarray, torch.Tensor]: """ Apply the transform to `img`, assuming `img` is a numpy array or PyTorch Tensor. @@ -230,8 +227,8 @@ def __call__( """ if isinstance(img, np.ndarray): - return img.astype(self.dtype if dtype is None else dtype) - if torch.is_tensor(img): + return img.astype(self.dtype if dtype is None else dtype) # type: ignore + if isinstance(img, torch.Tensor): return torch.as_tensor(img, dtype=self.dtype if dtype is None else dtype) raise TypeError(f"img must be one of (numpy.ndarray, torch.Tensor) but is {type(img).__name__}.") @@ -245,7 +242,7 @@ def __call__(self, img: Union[np.ndarray, torch.Tensor]) -> torch.Tensor: """ Apply the transform to `img` and make it contiguous. """ - if torch.is_tensor(img): + if isinstance(img, torch.Tensor): return img.contiguous() return torch.as_tensor(np.ascontiguousarray(img)) @@ -259,7 +256,7 @@ def __call__(self, img: Union[List, Tuple, np.ndarray, torch.Tensor]) -> np.ndar """ Apply the transform to `img` and make it contiguous. """ - if torch.is_tensor(img): + if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy() # type: ignore return np.ascontiguousarray(img) @@ -276,7 +273,7 @@ def __call__(self, img: np.ndarray) -> np.ndarray: """ Apply the transform to `img`. """ - return img.transpose(self.indices) + return img.transpose(self.indices) # type: ignore class SqueezeDim(Transform): @@ -303,7 +300,7 @@ def __call__(self, img: NdarrayTensor) -> NdarrayTensor: Args: img: numpy arrays with required dimension `dim` removed """ - return img.squeeze(self.dim) + return img.squeeze(self.dim) # type: ignore class DataStats(Transform): @@ -372,7 +369,7 @@ def __call__( if self.value_range if value_range is None else value_range: if isinstance(img, np.ndarray): lines.append(f"Value range: ({np.min(img)}, {np.max(img)})") - elif torch.is_tensor(img): + elif isinstance(img, torch.Tensor): lines.append(f"Value range: ({torch.min(img)}, {torch.max(img)})") else: lines.append(f"Value range: (not a PyTorch or Numpy array, type: {type(img)})") @@ -497,7 +494,7 @@ def __init__( # pytype: disable=annotation-type-mismatch def __call__( self, img: np.ndarray, select_labels: Optional[Union[Sequence[int], int]] = None, merge_channels: bool = False - ) -> np.ndarray: + ): """ Args: select_labels: labels to generate mask from. for 1 channel label, the `select_labels` @@ -617,7 +614,7 @@ def __call__( sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor] = 3.0, rescale_min: float = -1.0, rescale_max: float = 1.0, - ) -> np.ndarray: + ): """ Args: img: the image that we want to add new channel to. diff --git a/monai/transforms/utility/dictionary.py b/monai/transforms/utility/dictionary.py index ef89dbe32d..951c9dd459 100644 --- a/monai/transforms/utility/dictionary.py +++ b/monai/transforms/utility/dictionary.py @@ -22,7 +22,7 @@ import numpy as np import torch -from monai.config import KeysCollection +from monai.config import DtypeLike, KeysCollection, NdarrayTensor from monai.transforms.compose import MapTransform, Randomizable from monai.transforms.utility.array import ( AddChannel, @@ -127,7 +127,9 @@ def __init__(self, keys: KeysCollection) -> None: super().__init__(keys) self.identity = Identity() - def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, np.ndarray]: + def __call__( + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) for key in self.keys: d[key] = self.identity(d[key]) @@ -192,9 +194,7 @@ def __init__(self, keys: KeysCollection) -> None: super().__init__(keys) self.adder = AddChannel() - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) for key in self.keys: d[key] = self.adder(d[key]) @@ -279,14 +279,14 @@ class CastToTyped(MapTransform): def __init__( self, keys: KeysCollection, - dtype: Union[Sequence[Union[np.dtype, torch.dtype]], np.dtype, torch.dtype] = np.float32, + dtype: Union[Sequence[Union[DtypeLike, torch.dtype]], DtypeLike, torch.dtype] = np.float32, ) -> None: """ Args: keys: keys of the corresponding items to be transformed. See also: :py:class:`monai.transforms.compose.MapTransform` dtype: convert image to this data type, default is `np.float32`. - it also can be a sequence of np.dtype or torch.dtype, + it also can be a sequence of dtypes or torch.dtype, each element corresponds to a key in ``keys``. """ @@ -318,7 +318,9 @@ def __init__(self, keys: KeysCollection) -> None: super().__init__(keys) self.converter = ToTensor() - def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, torch.Tensor]: + def __call__( + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) for key in self.keys: d[key] = self.converter(d[key]) @@ -339,7 +341,9 @@ def __init__(self, keys: KeysCollection) -> None: super().__init__(keys) self.converter = ToNumpy() - def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> Dict[Hashable, np.ndarray]: + def __call__( + self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] + ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: d = dict(data) for key in self.keys: d[key] = self.converter(d[key]) @@ -382,9 +386,7 @@ def __init__(self, keys: KeysCollection, dim: int = 0) -> None: super().__init__(keys) self.converter = SqueezeDim(dim=dim) - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) for key in self.keys: d[key] = self.converter(d[key]) @@ -435,9 +437,7 @@ def __init__( self.logger_handler = logger_handler self.printer = DataStats(logger_handler=logger_handler) - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) for idx, key in enumerate(self.keys): d[key] = self.printer( @@ -469,9 +469,7 @@ def __init__(self, keys: KeysCollection, delay_time: Union[Sequence[float], floa self.delay_time = ensure_tuple_rep(delay_time, len(self.keys)) self.delayer = SimulateDelay() - def __call__( - self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]] - ) -> Dict[Hashable, Union[np.ndarray, torch.Tensor]]: + def __call__(self, data: Mapping[Hashable, NdarrayTensor]) -> Dict[Hashable, NdarrayTensor]: d = dict(data) for idx, key in enumerate(self.keys): d[key] = self.delayer(d[key], delay_time=self.delay_time[idx]) diff --git a/monai/transforms/utils.py b/monai/transforms/utils.py index 23c6bd100a..e5e9f81cc6 100644 --- a/monai/transforms/utils.py +++ b/monai/transforms/utils.py @@ -17,7 +17,7 @@ import numpy as np import torch -from monai.config import IndexSelection +from monai.config import DtypeLike, IndexSelection from monai.networks.layers import GaussianFilter from monai.utils import ensure_tuple, ensure_tuple_rep, ensure_tuple_size, fall_back_tuple, min_version, optional_import @@ -58,7 +58,7 @@ def rand_choice(prob: float = 0.5) -> bool: return bool(random.random() <= prob) -def img_bounds(img: np.ndarray) -> np.ndarray: +def img_bounds(img: np.ndarray): """ Returns the minimum and maximum indices of non-zero lines in axis 0 of `img`, followed by that for axis 1. """ @@ -91,9 +91,7 @@ def zero_margins(img: np.ndarray, margin: int) -> bool: return not np.any(img[:, :margin, :]) and not np.any(img[:, -margin:, :]) -def rescale_array( - arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: Optional[np.dtype] = np.float32 -) -> np.ndarray: +def rescale_array(arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: DtypeLike = np.float32): """ Rescale the values of numpy array `arr` to be from `minv` to `maxv`. """ @@ -111,7 +109,7 @@ def rescale_array( def rescale_instance_array( - arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: np.dtype = np.float32 + arr: np.ndarray, minv: float = 0.0, maxv: float = 1.0, dtype: DtypeLike = np.float32 ) -> np.ndarray: """ Rescale each array slice along the first dimension of `arr` independently. @@ -123,12 +121,12 @@ def rescale_instance_array( return out -def rescale_array_int_max(arr: np.ndarray, dtype: np.dtype = np.uint16) -> np.ndarray: +def rescale_array_int_max(arr: np.ndarray, dtype: DtypeLike = np.uint16) -> np.ndarray: """ Rescale the array `arr` to be between the minimum and maximum values of the type `dtype`. """ info: np.iinfo = np.iinfo(dtype) - return rescale_array(arr, info.min, info.max).astype(dtype) + return np.asarray(rescale_array(arr, info.min, info.max), dtype=dtype) def copypaste_arrays( @@ -191,9 +189,7 @@ def copypaste_arrays( return tuple(srcslices), tuple(destslices) -def resize_center( - img: np.ndarray, *resize_dims: Optional[int], fill_value: float = 0.0, inplace: bool = True -) -> np.ndarray: +def resize_center(img: np.ndarray, *resize_dims: Optional[int], fill_value: float = 0.0, inplace: bool = True): """ Resize `img` by cropping or expanding the image from the center. The `resize_dims` values are the output dimensions (or None to use original dimension of `img`). If a dimension is smaller than that of `img` then the result will be @@ -208,7 +204,7 @@ def resize_center( srcslices, destslices = copypaste_arrays(img.shape, resize_dims, half_img_shape, half_dest_shape, resize_dims) if not inplace: - dest = np.full(resize_dims, fill_value, img.dtype) + dest = np.full(resize_dims, fill_value, img.dtype) # type: ignore dest[destslices] = img[srcslices] return dest return img[srcslices] @@ -271,8 +267,8 @@ def weighted_patch_samples( raise ValueError("w must be an ND array.") if r_state is None: r_state = np.random.RandomState() - img_size = np.asarray(w.shape, dtype=np.int) - win_size = np.asarray(fall_back_tuple(spatial_size, img_size), dtype=np.int) + img_size = np.asarray(w.shape, dtype=int) + win_size = np.asarray(fall_back_tuple(spatial_size, img_size), dtype=int) s = tuple(slice(w // 2, m - w + w // 2) if m > w else slice(m // 2, m // 2 + 1) for w, m in zip(win_size, img_size)) v = w[s] # weight map in the 'valid' mode @@ -287,7 +283,7 @@ def weighted_patch_samples( idx = v.searchsorted(r_state.random(n_samples) * v[-1], side="right") # compensate 'valid' mode diff = np.minimum(win_size, img_size) // 2 - return [np.unravel_index(i, v_size) + diff for i in np.asarray(idx, dtype=np.int)] + return [np.unravel_index(i, v_size) + diff for i in np.asarray(idx, dtype=int)] def generate_pos_neg_label_crop_centers( @@ -395,8 +391,8 @@ def create_grid( spatial_size: Sequence[int], spacing: Optional[Sequence[float]] = None, homogeneous: bool = True, - dtype: np.dtype = float, -) -> np.ndarray: + dtype: DtypeLike = float, +): """ compute a `spatial_size` mesh. @@ -415,8 +411,8 @@ def create_grid( def create_control_grid( - spatial_shape: Sequence[int], spacing: Sequence[float], homogeneous: bool = True, dtype: np.dtype = float -) -> np.ndarray: + spatial_shape: Sequence[int], spacing: Sequence[float], homogeneous: bool = True, dtype: DtypeLike = float +): """ control grid with two additional point in each direction """ @@ -461,11 +457,15 @@ def create_rotate(spatial_dims: int, radians: Union[Sequence[float], float]) -> ) if len(radians) >= 2: sin_, cos_ = np.sin(radians[1]), np.cos(radians[1]) + if affine is None: + raise ValueError("Affine should be a matrix.") affine = affine @ np.array( [[cos_, 0.0, sin_, 0.0], [0.0, 1.0, 0.0, 0.0], [-sin_, 0.0, cos_, 0.0], [0.0, 0.0, 0.0, 1.0]] ) if len(radians) >= 3: sin_, cos_ = np.sin(radians[2]), np.cos(radians[2]) + if affine is None: + raise ValueError("Affine should be a matrix.") affine = affine @ np.array( [[cos_, -sin_, 0.0, 0.0], [sin_, cos_, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] ) @@ -504,7 +504,7 @@ def create_shear(spatial_dims: int, coefs: Union[Sequence[float], float]) -> np. raise NotImplementedError("Currently only spatial_dims in [2, 3] are supported.") -def create_scale(spatial_dims: int, scaling_factor: Union[Sequence[float], float]) -> np.ndarray: +def create_scale(spatial_dims: int, scaling_factor: Union[Sequence[float], float]): """ create a scaling matrix @@ -528,7 +528,7 @@ def create_translate(spatial_dims: int, shift: Union[Sequence[float], float]) -> affine = np.eye(spatial_dims + 1) for i, a in enumerate(shift[:spatial_dims]): affine[i, spatial_dims] = a - return affine + return np.asarray(affine) def generate_spatial_bounding_box( diff --git a/monai/utils/misc.py b/monai/utils/misc.py index 2b31392a46..c5e8318db3 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -76,7 +76,7 @@ def issequenceiterable(obj: Any) -> bool: """ Determine if the object is an iterable sequence and is not a string. """ - if torch.is_tensor(obj): + if isinstance(obj, torch.Tensor): return int(obj.dim()) > 0 # a 0-d tensor is not iterable return isinstance(obj, collections.abc.Iterable) and not isinstance(obj, str) @@ -130,7 +130,9 @@ def ensure_tuple_rep(tup: Any, dim: int) -> Tuple[Any, ...]: raise ValueError(f"Sequence must have length {dim}, got {len(tup)}.") -def fall_back_tuple(user_provided: Any, default: Sequence, func: Callable = lambda x: x and x > 0) -> Tuple[Any, ...]: +def fall_back_tuple( + user_provided: Any, default: Union[Sequence, np.ndarray], func: Callable = lambda x: x and x > 0 +) -> Tuple[Any, ...]: """ Refine `user_provided` according to the `default`, and returns as a validated tuple. @@ -175,13 +177,13 @@ def fall_back_tuple(user_provided: Any, default: Sequence, func: Callable = lamb def is_scalar_tensor(val: Any) -> bool: - if torch.is_tensor(val) and val.ndim == 0: + if isinstance(val, torch.Tensor) and val.ndim == 0: return True return False def is_scalar(val: Any) -> bool: - if torch.is_tensor(val) and val.ndim == 0: + if isinstance(val, torch.Tensor) and val.ndim == 0: return True return bool(np.isscalar(val)) @@ -287,7 +289,7 @@ def _parse_var(s): _torch_to_np_dtype = { - torch.bool: np.bool, + torch.bool: bool, torch.uint8: np.uint8, torch.int8: np.int8, torch.int16: np.int16, diff --git a/monai/visualize/img2tensorboard.py b/monai/visualize/img2tensorboard.py index 8f6eca5482..b02a7a80ea 100644 --- a/monai/visualize/img2tensorboard.py +++ b/monai/visualize/img2tensorboard.py @@ -96,7 +96,7 @@ def make_animated_gif_summary( for it_i in range(min(max_out, list(image.shape)[0])): one_channel_img: Union[torch.Tensor, np.ndarray] = ( - image[it_i, :, :, :].squeeze(dim=0) if torch.is_tensor(image) else image[it_i, :, :, :] + image[it_i, :, :, :].squeeze(dim=0) if isinstance(image, torch.Tensor) else image[it_i, :, :, :] ) summary_op = _image3_animated_gif(tag + suffix.format(it_i), one_channel_img, scale_factor) return summary_op @@ -182,7 +182,7 @@ def plot_2d_or_3d_image( max_frames: number of frames for 2D-t plot. tag: tag of the plotted image on TensorBoard. """ - d = data[index].detach().cpu().numpy() if torch.is_tensor(data) else data[index] + d = data[index].detach().cpu().numpy() if isinstance(data, torch.Tensor) else data[index] if d.ndim == 2: d = rescale_array(d, 0, 1) diff --git a/tests/test_affine.py b/tests/test_affine.py index fbda818437..934473fc5c 100644 --- a/tests/test_affine.py +++ b/tests/test_affine.py @@ -79,8 +79,8 @@ class TestAffine(unittest.TestCase): def test_affine(self, input_param, input_data, expected_val): g = Affine(**input_param) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_affine_grid.py b/tests/test_affine_grid.py index c7caae29b4..2906cd18b6 100644 --- a/tests/test_affine_grid.py +++ b/tests/test_affine_grid.py @@ -93,8 +93,8 @@ class TestAffineGrid(unittest.TestCase): def test_affine_grid(self, input_param, input_data, expected_val): g = AffineGrid(**input_param) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_crop_foregroundd.py b/tests/test_crop_foregroundd.py index f4283514de..cacf990763 100644 --- a/tests/test_crop_foregroundd.py +++ b/tests/test_crop_foregroundd.py @@ -65,14 +65,14 @@ def test_value(self, argments, image, expected_data): @parameterized.expand([TEST_CASE_1]) def test_foreground_position(self, argments, image, _): result = CropForegroundd(**argments)(image) - self.assertListEqual(result["foreground_start_coord"], [1, 1]) - self.assertListEqual(result["foreground_end_coord"], [4, 4]) + np.testing.assert_allclose(result["foreground_start_coord"], np.array([1, 1])) + np.testing.assert_allclose(result["foreground_end_coord"], np.array([4, 4])) argments["start_coord_key"] = "test_start_coord" argments["end_coord_key"] = "test_end_coord" result = CropForegroundd(**argments)(image) - self.assertListEqual(result["test_start_coord"], [1, 1]) - self.assertListEqual(result["test_end_coord"], [4, 4]) + np.testing.assert_allclose(result["test_start_coord"], np.array([1, 1])) + np.testing.assert_allclose(result["test_end_coord"], np.array([4, 4])) if __name__ == "__main__": diff --git a/tests/test_detect_envelope.py b/tests/test_detect_envelope.py index 08c699c84f..47b3a66305 100644 --- a/tests/test_detect_envelope.py +++ b/tests/test_detect_envelope.py @@ -98,7 +98,7 @@ TEST_CASE_INVALID_DTYPE = [ {}, - np.expand_dims(np.array(hann_windowed_sine, dtype=np.complex), 0), # complex numbers are invalid + np.expand_dims(np.array(hann_windowed_sine, dtype=complex), 0), # complex numbers are invalid "__call__", # method expected to raise exception ] diff --git a/tests/test_patch_dataset.py b/tests/test_patch_dataset.py index 59174123ca..3dadbe3d92 100644 --- a/tests/test_patch_dataset.py +++ b/tests/test_patch_dataset.py @@ -42,7 +42,7 @@ def test_shape(self): def test_loading_array(self): set_determinism(seed=1234) # image dataset - images = [np.arange(16, dtype=np.float).reshape(1, 4, 4), np.arange(16, dtype=np.float).reshape(1, 4, 4)] + images = [np.arange(16, dtype=float).reshape(1, 4, 4), np.arange(16, dtype=float).reshape(1, 4, 4)] # image patch sampler n_samples = 8 sampler = RandSpatialCropSamples(roi_size=(3, 3), num_samples=n_samples, random_center=True, random_size=False) diff --git a/tests/test_rand_affine.py b/tests/test_rand_affine.py index 72fa772d96..68126f5c8e 100644 --- a/tests/test_rand_affine.py +++ b/tests/test_rand_affine.py @@ -74,8 +74,8 @@ def test_rand_affine(self, input_param, input_data, expected_val): g = RandAffine(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_affine_grid.py b/tests/test_rand_affine_grid.py index c3fe078afd..605d0a30ba 100644 --- a/tests/test_rand_affine_grid.py +++ b/tests/test_rand_affine_grid.py @@ -187,8 +187,8 @@ def test_rand_affine_grid(self, input_param, input_data, expected_val): g = RandAffineGrid(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_affined.py b/tests/test_rand_affined.py index 51bb59cd37..54d71ad8f7 100644 --- a/tests/test_rand_affined.py +++ b/tests/test_rand_affined.py @@ -146,8 +146,8 @@ def test_rand_affined(self, input_param, input_data, expected_val): for key in res: result = res[key] expected = expected_val[key] if isinstance(expected_val, dict) else expected_val - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_deform_grid.py b/tests/test_rand_deform_grid.py index 0b969f8f4b..7c12c263d2 100644 --- a/tests/test_rand_deform_grid.py +++ b/tests/test_rand_deform_grid.py @@ -129,8 +129,8 @@ def test_rand_deform_grid(self, input_param, input_data, expected_val): g = RandDeformGrid(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_elastic_2d.py b/tests/test_rand_elastic_2d.py index c9db225742..aa408f0fdc 100644 --- a/tests/test_rand_elastic_2d.py +++ b/tests/test_rand_elastic_2d.py @@ -95,8 +95,8 @@ def test_rand_2d_elastic(self, input_param, input_data, expected_val): g = Rand2DElastic(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_elastic_3d.py b/tests/test_rand_elastic_3d.py index f2b1669a46..8cd74c6be7 100644 --- a/tests/test_rand_elastic_3d.py +++ b/tests/test_rand_elastic_3d.py @@ -74,8 +74,8 @@ def test_rand_3d_elastic(self, input_param, input_data, expected_val): g = Rand3DElastic(**input_param) g.set_random_state(123) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_elasticd_2d.py b/tests/test_rand_elasticd_2d.py index 054a0c2150..f8eb026088 100644 --- a/tests/test_rand_elasticd_2d.py +++ b/tests/test_rand_elasticd_2d.py @@ -144,8 +144,8 @@ def test_rand_2d_elasticd(self, input_param, input_data, expected_val): for key in res: result = res[key] expected = expected_val[key] if isinstance(expected_val, dict) else expected_val - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) diff --git a/tests/test_rand_elasticd_3d.py b/tests/test_rand_elasticd_3d.py index 97df8a43e3..47ab814882 100644 --- a/tests/test_rand_elasticd_3d.py +++ b/tests/test_rand_elasticd_3d.py @@ -115,8 +115,8 @@ def test_rand_3d_elasticd(self, input_param, input_data, expected_val): for key in res: result = res[key] expected = expected_val[key] if isinstance(expected_val, dict) else expected_val - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected, rtol=1e-4, atol=1e-4) diff --git a/tests/test_resampler.py b/tests/test_resampler.py index a4536967fa..2be94acebd 100644 --- a/tests/test_resampler.py +++ b/tests/test_resampler.py @@ -75,8 +75,8 @@ class TestResample(unittest.TestCase): def test_resample(self, input_param, input_data, expected_val): g = Resample(**input_param) result = g(**input_data) - self.assertEqual(torch.is_tensor(result), torch.is_tensor(expected_val)) - if torch.is_tensor(result): + self.assertEqual(isinstance(result, torch.Tensor), isinstance(expected_val, torch.Tensor)) + if isinstance(result, torch.Tensor): np.testing.assert_allclose(result.cpu().numpy(), expected_val.cpu().numpy(), rtol=1e-4, atol=1e-4) else: np.testing.assert_allclose(result, expected_val, rtol=1e-4, atol=1e-4) diff --git a/tests/test_spacing.py b/tests/test_spacing.py index bc491f2f82..9a1ee88679 100644 --- a/tests/test_spacing.py +++ b/tests/test_spacing.py @@ -19,19 +19,19 @@ TEST_CASES = [ [ - {"pixdim": (1.0, 1.5, 1.0), "padding_mode": "zeros", "dtype": np.float}, + {"pixdim": (1.0, 1.5, 1.0), "padding_mode": "zeros", "dtype": float}, np.arange(4).reshape((1, 2, 2)) + 1.0, # data {"affine": np.eye(4)}, np.array([[[1.0, 1.0], [3.0, 2.0]]]), ], [ - {"pixdim": 1.0, "padding_mode": "zeros", "dtype": np.float}, + {"pixdim": 1.0, "padding_mode": "zeros", "dtype": float}, np.ones((1, 2, 1, 2)), # data {"affine": np.eye(4)}, np.array([[[[1.0, 1.0]], [[1.0, 1.0]]]]), ], [ - {"pixdim": (1.0, 1.0, 1.0), "padding_mode": "zeros", "dtype": np.float}, + {"pixdim": (1.0, 1.0, 1.0), "padding_mode": "zeros", "dtype": float}, np.ones((1, 2, 1, 2)), # data {"affine": np.eye(4)}, np.array([[[[1.0, 1.0]], [[1.0, 1.0]]]]),