Skip to content

Commit fd985a3

Browse files
craymichaelfacebook-github-bot
authored andcommitted
Add stacklevel to across captum to satisfy flake8 (#1382)
Summary: Pull Request resolved: #1382 Add stacklevel to silence flake8: Warning (FLAKE8) B028 No explicit stacklevel argument found. The warn method from the warnings stacklevel=1 default, stacklevel=2 in some spots that user might need more context Reviewed By: vivekmig Differential Revision: D64518463 fbshipit-source-id: 7ff6243f049157c809e4811cc833b995c4a1396b
1 parent 103a3fd commit fd985a3

File tree

16 files changed

+64
-29
lines changed

16 files changed

+64
-29
lines changed

captum/_utils/av.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,8 @@ def _manage_loading_layers(
330330
"Overwriting activations: load_from_disk is set to False. Removing all "
331331
f"activations matching specified parameters {{path: {path}, "
332332
f"model_id: {model_id}, layers: {layers}, identifier: {identifier}}} "
333-
"before generating new activations."
333+
"before generating new activations.",
334+
stacklevel=1,
334335
)
335336
for layer in layers:
336337
files = glob.glob(

captum/_utils/progress.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,8 @@ def progress(
214214
warnings.warn(
215215
"Tried to show progress with tqdm "
216216
"but tqdm is not installed. "
217-
"Fall back to simply print out the progress."
217+
"Fall back to simply print out the progress.",
218+
stacklevel=1,
218219
)
219220
return SimpleProgress(
220221
iterable, desc=desc, total=total, file=file, mininterval=mininterval

captum/attr/_core/guided_backprop_deconvnet.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ def attribute(
7272
# set hooks for overriding ReLU gradients
7373
warnings.warn(
7474
"Setting backward hooks on ReLU activations."
75-
"The hooks will be removed after the attribution is finished"
75+
"The hooks will be removed after the attribution is finished",
76+
stacklevel=1,
7677
)
7778
try:
7879
self.model.apply(self._register_hooks)

captum/attr/_core/guided_grad_cam.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,8 @@ def attribute(
225225
warnings.warn(
226226
"Couldn't appropriately interpolate GradCAM attributions for some "
227227
"input tensors, returning empty tensor for corresponding "
228-
"attributions."
228+
"attributions.",
229+
stacklevel=1,
229230
)
230231
output_attr.append(torch.empty(0))
231232

captum/attr/_core/lime.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,8 @@ def construct_feature_mask(
715715
if min_interp_features != 0:
716716
warnings.warn(
717717
"Minimum element in feature mask is not 0, shifting indices to"
718-
" start at 0."
718+
" start at 0.",
719+
stacklevel=2,
719720
)
720721
feature_mask = tuple(
721722
single_mask - min_interp_features for single_mask in feature_mask
@@ -1157,7 +1158,8 @@ def _attribute_kwargs( # type: ignore
11571158
"Attempting to construct interpretable model with > 10000 features."
11581159
"This can be very slow or lead to OOM issues. Please provide a feature"
11591160
"mask which groups input features to reduce the number of interpretable"
1160-
"features. "
1161+
"features. ",
1162+
stacklevel=1,
11611163
)
11621164

11631165
coefs: Tensor
@@ -1171,7 +1173,9 @@ def _attribute_kwargs( # type: ignore
11711173
"You are providing multiple inputs for Lime / Kernel SHAP "
11721174
"attributions. This trains a separate interpretable model "
11731175
"for each example, which can be time consuming. It is "
1174-
"recommended to compute attributions for one example at a time."
1176+
"recommended to compute attributions for one example at a "
1177+
"time.",
1178+
stacklevel=1,
11751179
)
11761180
output_list = []
11771181
for (

captum/attr/_core/neuron/neuron_conductance.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,8 @@ def attribute(
283283
"The neuron_selector provided is a callable. Please ensure that this"
284284
" function only selects neurons from the given layer; aggregating"
285285
" or performing other operations on the tensor may lead to inaccurate"
286-
" results."
286+
" results.",
287+
stacklevel=1,
287288
)
288289
# pyre-fixme[6]: For 1st argument expected `Tensor` but got
289290
# `TensorOrTupleOfTensorsGeneric`.

captum/attr/_core/shapley_value.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,8 @@ def attribute(
411411
warnings.warn(
412412
"Feature mask is missing some integers between 0 and "
413413
"num_features, for optimal performance, make sure each"
414-
" consecutive integer corresponds to a feature."
414+
" consecutive integer corresponds to a feature.",
415+
stacklevel=1,
415416
)
416417
# modified_eval dimensions: 1D tensor with length
417418
# equal to #num_examples * #features in batch
@@ -858,7 +859,8 @@ def attribute(
858859
warnings.warn(
859860
"You are attempting to compute Shapley Values with at least 10 "
860861
"features, which will likely be very computationally expensive."
861-
"Consider using Shapley Value Sampling instead."
862+
"Consider using Shapley Value Sampling instead.",
863+
stacklevel=1,
862864
)
863865

864866
return super().attribute.__wrapped__(

captum/attr/_models/base.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,8 @@ def configure_interpretable_embedding_layer(
211211
"embeddings and compute attributions for each embedding dimension. "
212212
"The original embedding layer must be set "
213213
"back by calling `remove_interpretable_embedding_layer` function "
214-
"after model interpretation is finished. "
214+
"after model interpretation is finished. ",
215+
stacklevel=1,
215216
)
216217
interpretable_emb = InterpretableEmbeddingBase(
217218
embedding_layer, embedding_layer_name

captum/attr/_utils/batching.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ def _batch_attribution(
5151
warnings.warn(
5252
"Internal batch size cannot be less than the number of input examples. "
5353
"Defaulting to internal batch size of %d equal to the number of examples."
54-
% num_examples
54+
% num_examples,
55+
stacklevel=1,
5556
)
5657
# Number of steps for each batch
5758
step_count = max(1, internal_batch_size // num_examples)
@@ -62,7 +63,8 @@ def _batch_attribution(
6263
"This method computes finite differences between evaluations at "
6364
"consecutive steps, so internal batch size must be at least twice "
6465
"the number of examples. Defaulting to internal batch size of %d"
65-
" equal to twice the number of examples." % (2 * num_examples)
66+
" equal to twice the number of examples." % (2 * num_examples),
67+
stacklevel=1,
6668
)
6769

6870
total_attr = None
@@ -161,7 +163,8 @@ def _batched_generator(
161163
warnings.warn(
162164
"""It looks like that the attribution for a gradient-based method is
163165
computed in a `torch.no_grad` block or perhaps the inputs have no
164-
requires_grad."""
166+
requires_grad.""",
167+
stacklevel=1,
165168
)
166169
if internal_batch_size is None:
167170
# pyre-fixme[7]: Expected `Iterator[Tuple[typing.Tuple[Tensor, ...], typing.A...

captum/influence/_core/similarity_influence.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,11 @@ def influence( # type: ignore[override]
297297
"returned as a tensor with [inputs_idx, src_dataset_idx] pairs "
298298
"which may have corrupted similarity scores."
299299
)
300-
warnings.warn(zero_warning, RuntimeWarning)
300+
warnings.warn(
301+
zero_warning,
302+
RuntimeWarning,
303+
stacklevel=1,
304+
)
301305
key = "-".join(["zero_acts", layer])
302306
influences[key] = zero_acts
303307

captum/influence/_core/tracincp.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,8 @@ def __init__(
178178
"Unable to determine the number of batches in training dataset "
179179
"`train_dataset`. Therefore, if showing the progress of computations, "
180180
"only the number of batches processed can be displayed, and not the "
181-
"percentage completion of the computation, nor any time estimates."
181+
"percentage completion of the computation, nor any time estimates.",
182+
stacklevel=1,
182183
)
183184

184185
@property
@@ -1232,7 +1233,8 @@ def _self_influence_by_checkpoints(
12321233
"Therefore, if showing the progress of the computation of self "
12331234
"influence scores, only the number of batches processed can be "
12341235
"displayed, and not the percentage completion of the computation, "
1235-
"nor any time estimates."
1236+
"nor any time estimates.",
1237+
stacklevel=1,
12361238
)
12371239

12381240
# pyre-fixme[3]: Return type must be annotated.

captum/influence/_core/tracincp_fast_rand_proj.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,8 @@ def _self_influence_by_checkpoints(
579579
"Therefore, if showing the progress of the computation of self "
580580
"influence scores, only the number of batches processed can be "
581581
"displayed, and not the percentage completion of the computation, "
582-
"nor any time estimates."
582+
"nor any time estimates.",
583+
stacklevel=1,
583584
)
584585

585586
# pyre-fixme[53]: Captured variable `inputs_len` is not annotated.
@@ -1040,7 +1041,8 @@ def __init__(
10401041
"each call to `influence` to compute influence scores or proponents "
10411042
"will be slower, but may avoid running out of memory."
10421043
)
1043-
% "`TracInCPFast`"
1044+
% "`TracInCPFast`",
1045+
stacklevel=1,
10441046
)
10451047

10461048
# pyre-fixme[4]: Attribute must be annotated.
@@ -1230,7 +1232,8 @@ def self_influence(
12301232
"random projections results only in approximate self influence "
12311233
"scores, there is no reason to use `TracInCPFastRandProj` when "
12321234
"calculating self influence scores."
1233-
)
1235+
),
1236+
stacklevel=1,
12341237
)
12351238
raise NotImplementedError
12361239

captum/influence/_utils/common.py

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,8 @@ def _self_influence_by_batches_helper(
420420
"Therefore, if showing the progress of the computation of self "
421421
"influence scores, only the number of batches processed can be "
422422
"displayed, and not the percentage completion of the computation, "
423-
"nor any time estimates."
423+
"nor any time estimates.",
424+
stacklevel=1,
424425
)
425426
# then create the progress bar
426427
inputs_dataset = progress(
@@ -501,7 +502,8 @@ def _check_loss_fn(
501502
f'please set the reduction attribute of `{loss_fn_name}` to "mean", '
502503
f'i.e. `{loss_fn_name}.reduction = "mean"`. Note that if '
503504
"`sample_wise_grads_per_batch` is True, the implementation "
504-
"assumes the reduction is either a sum or mean reduction."
505+
"assumes the reduction is either a sum or mean reduction.",
506+
stacklevel=1,
505507
)
506508
reduction_type = "sum"
507509
else:
@@ -510,7 +512,8 @@ def _check_loss_fn(
510512
"`sample_wise_grads_per_batch` is False, the implementation "
511513
f'assumes that `{loss_fn_name}` is a "per-example" loss function (see '
512514
f"documentation for `{loss_fn_name}` for details). Please ensure "
513-
"that this is the case."
515+
"that this is the case.",
516+
stacklevel=1,
514517
)
515518

516519
return reduction_type
@@ -531,7 +534,8 @@ def _set_active_parameters(model: Module, layers: List[str]) -> List[Module]:
531534
warnings.warn(
532535
"Setting required grads for layer: {}, name: {}".format(
533536
".".join(layer), name
534-
)
537+
),
538+
stacklevel=1,
535539
)
536540
param.requires_grad = True
537541
return layer_modules
@@ -556,7 +560,8 @@ def _progress_bar_constructor(
556560
f"of the computation of {quantities_name}, "
557561
"only the number of batches processed can be "
558562
"displayed, and not the percentage completion of the computation, "
559-
"nor any time estimates."
563+
"nor any time estimates.",
564+
stacklevel=1,
560565
)
561566

562567
return progress(
@@ -989,7 +994,10 @@ def _compute_batch_loss_influence_function_base(
989994
"`reduction='sum'` loss function, or a `reduction='none'` "
990995
"and set `sample_grads_per_batch` to false."
991996
)
992-
warnings.warn(msg)
997+
warnings.warn(
998+
msg,
999+
stacklevel=1,
1000+
)
9931001
return _loss * multiplier
9941002
elif reduction_type == "sum":
9951003
return _loss

captum/insights/example.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@
77
from captum.insights.attr_vis.example import * # noqa
88

99
warnings.warn(
10-
"Deprecated. Please import from captum.insights.attr_vis.example instead."
10+
"Deprecated. Please import from captum.insights.attr_vis.example instead.",
11+
stacklevel=1,
1112
)
1213

1314

captum/metrics/_utils/batching.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,8 @@ def _divide_and_aggregate_metrics(
6161
"to compute the metrics, contains at least an instance of "
6262
"the original example and doesn't exceed the number of "
6363
"expanded n_perturb_samples."
64-
).format(max_examples_per_batch, bsz)
64+
).format(max_examples_per_batch, bsz),
65+
stacklevel=1,
6566
)
6667

6768
max_inps_per_batch = (

captum/robust/_core/metrics/attack_comparator.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,8 @@ def _check_and_evaluate(input_list, key_list):
399399
for key in attack.additional_args:
400400
if key not in kwargs:
401401
warnings.warn(
402-
f"Additional sample arg {key} not provided for {attack_key}"
402+
f"Additional sample arg {key} not provided for {attack_key}",
403+
stacklevel=1,
403404
)
404405
else:
405406
additional_attack_args[key] = kwargs[key]

0 commit comments

Comments
 (0)