Skip to content

anomalib

Visualizer()

Anomaly Visualization.

The visualizer object is responsible for collating all the images passed to it into a single image. This can then either be logged by accessing the figure attribute or can be saved directly by calling save() method.

Example

visualizer = Visualizer() visualizer.add_image(image=image, title="Image") visualizer.close()

Source code in quadra/callbacks/anomalib.py
31
32
33
34
35
def __init__(self) -> None:
    self.images: List[Dict] = []

    self.figure: matplotlib.figure.Figure
    self.axis: np.ndarray

add_image(image, title, color_map=None)

Add image to figure.

Parameters:

  • image (ndarray) –

    Image which should be added to the figure.

  • title (str) –

    Image title shown on the plot.

  • color_map (Optional[str], default: None ) –

    Name of matplotlib color map used to map scalar data to colours. Defaults to None.

Source code in quadra/callbacks/anomalib.py
37
38
39
40
41
42
43
44
45
46
def add_image(self, image: np.ndarray, title: str, color_map: Optional[str] = None):
    """Add image to figure.

    Args:
      image: Image which should be added to the figure.
      title: Image title shown on the plot.
      color_map: Name of matplotlib color map used to map scalar data to colours. Defaults to None.
    """
    image_data = {"image": image, "title": title, "color_map": color_map}
    self.images.append(image_data)

close()

Close figure.

Source code in quadra/callbacks/anomalib.py
78
79
80
def close(self):
    """Close figure."""
    plt.close(self.figure)

generate()

Generate the image.

Source code in quadra/callbacks/anomalib.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def generate(self):
    """Generate the image."""
    default_plt_backend = plt.get_backend()
    plt.switch_backend("Agg")
    num_cols = len(self.images)
    figure_size = (num_cols * 3, 3)
    self.figure, self.axis = plt.subplots(1, num_cols, figsize=figure_size)
    self.figure.subplots_adjust(right=0.9)

    axes = self.axis if len(self.images) > 1 else [self.axis]
    for axis, image_dict in zip(axes, self.images):
        axis.axes.xaxis.set_visible(False)
        axis.axes.yaxis.set_visible(False)
        axis.imshow(image_dict["image"], image_dict["color_map"], vmin=0, vmax=255)
        axis.title.set_text(image_dict["title"])
    plt.switch_backend(default_plt_backend)

save(filename)

Save image.

Parameters:

  • filename (Path) –

    Filename to save image

Source code in quadra/callbacks/anomalib.py
69
70
71
72
73
74
75
76
def save(self, filename: Path):
    """Save image.

    Args:
      filename: Filename to save image
    """
    filename.parent.mkdir(parents=True, exist_ok=True)
    self.figure.savefig(filename, dpi=100)

show()

Show image on a matplotlib figure.

Source code in quadra/callbacks/anomalib.py
65
66
67
def show(self):
    """Show image on a matplotlib figure."""
    self.figure.show()

VisualizerCallback(task='segmentation', output_path='anomaly_output', inputs_are_normalized=True, threshold_type='pixel', disable=False, plot_only_wrong=False, plot_raw_outputs=False)

Bases: Callback

Callback that visualizes the inference results of a model.

The callback generates a figure showing the original image, the ground truth segmentation mask, the predicted error heat map, and the predicted segmentation mask. To save the images to the filesystem, add the 'local' keyword to the project.log_images_to parameter in the config.yaml file.

Parameters:

  • task (str, default: 'segmentation' ) –

    either 'segmentation' or 'classification'

  • output_path (str, default: 'anomaly_output' ) –

    location where the images will be saved.

  • inputs_are_normalized (bool, default: True ) –

    whether the input images are normalized (i.e using MinMax callback).

  • threshold_type (str, default: 'pixel' ) –

    Either 'pixel' or 'image'. If 'pixel', the threshold is computed on the pixel-level.

  • disable (bool, default: False ) –

    whether to disable the callback.

  • plot_only_wrong (bool, default: False ) –

    whether to plot only the images that are not correctly predicted.

  • plot_raw_outputs (bool, default: False ) –

    Saves the raw images of the segmentation and heatmap output.

Source code in quadra/callbacks/anomalib.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def __init__(
    self,
    task: str = "segmentation",
    output_path: str = "anomaly_output",
    inputs_are_normalized: bool = True,
    threshold_type: str = "pixel",
    disable: bool = False,
    plot_only_wrong: bool = False,
    plot_raw_outputs: bool = False,
) -> None:
    self.inputs_are_normalized = inputs_are_normalized
    self.output_path = output_path
    self.threshold_type = threshold_type
    self.disable = disable
    self.task = task
    self.plot_only_wrong = plot_only_wrong
    self.plot_raw_outputs = plot_raw_outputs

on_test_batch_end(_trainer, pl_module, outputs, _batch, _batch_idx, _dataloader_idx)

Log images at the end of every batch.

Parameters:

  • _trainer (Trainer) –

    Pytorch lightning trainer object (unused).

  • pl_module (AnomalyModule) –

    Lightning modules derived from BaseAnomalyLightning object as currently only they support logging images.

  • outputs (Optional[STEP_OUTPUT]) –

    Outputs of the current test step.

  • _batch (Any) –

    Input batch of the current test step (unused).

  • _batch_idx (int) –

    Index of the current test batch (unused).

  • _dataloader_idx (int) –

    Index of the dataloader that yielded the current batch (unused).

Source code in quadra/callbacks/anomalib.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def on_test_batch_end(
    self,
    _trainer: pl.Trainer,
    pl_module: AnomalyModule,
    outputs: Optional[STEP_OUTPUT],
    _batch: Any,
    _batch_idx: int,
    _dataloader_idx: int,
) -> None:
    """Log images at the end of every batch.

    Args:
        _trainer: Pytorch lightning trainer object (unused).
        pl_module: Lightning modules derived from BaseAnomalyLightning object as
            currently only they support logging images.
        outputs: Outputs of the current test step.
        _batch: Input batch of the current test step (unused).
        _batch_idx: Index of the current test batch (unused).
        _dataloader_idx: Index of the dataloader that yielded the current batch (unused).
    """
    if self.disable:
        return

    assert outputs is not None and isinstance(outputs, dict)

    if any(x not in outputs.keys() for x in ["image_path", "image", "mask", "anomaly_maps", "label"]):
        # I'm probably in the classification scenario so I can't use the visualizer
        return

    if self.inputs_are_normalized:
        normalize = False  # anomaly maps are already normalized
    else:
        normalize = True  # raw anomaly maps. Still need to normalize

    if self.threshold_type == "pixel":
        if hasattr(pl_module.pixel_metrics.F1Score, "threshold"):
            threshold = pl_module.pixel_metrics.F1Score.threshold
        else:
            raise AttributeError("Metric has no threshold attribute")
    else:
        if hasattr(pl_module.image_metrics.F1Score, "threshold"):
            threshold = pl_module.image_metrics.F1Score.threshold
        else:
            raise AttributeError("Metric has no threshold attribute")

    for filename, image, true_mask, anomaly_map, gt_label, pred_label, anomaly_score in tqdm(
        zip(
            outputs["image_path"],
            outputs["image"],
            outputs["mask"],
            outputs["anomaly_maps"],
            outputs["label"],
            outputs["pred_labels"],
            outputs["pred_scores"],
        )
    ):
        image = Denormalize()(image.cpu())
        true_mask = true_mask.cpu().numpy()
        anomaly_map = anomaly_map.cpu().numpy()

        output_label_folder = "ok" if pred_label == gt_label else "wrong"

        if self.plot_only_wrong and output_label_folder == "ok":
            continue

        heat_map = superimpose_anomaly_map(anomaly_map, image, normalize=normalize)
        if isinstance(threshold, float):
            pred_mask = compute_mask(anomaly_map, threshold)
        else:
            raise TypeError("Threshold should be float")
        vis_img = mark_boundaries(image, pred_mask, color=(1, 0, 0), mode="thick")
        visualizer = Visualizer()

        if self.task == "segmentation":
            visualizer.add_image(image=image, title="Image")
            if "mask" in outputs:
                true_mask = true_mask * 255
                visualizer.add_image(image=true_mask, color_map="gray", title="Ground Truth")
            visualizer.add_image(image=heat_map, title="Predicted Heat Map")
            visualizer.add_image(image=pred_mask, color_map="gray", title="Predicted Mask")
            visualizer.add_image(image=vis_img, title="Segmentation Result")
        elif self.task == "classification":
            gt_im = add_anomalous_label(image) if gt_label else add_normal_label(image)
            visualizer.add_image(gt_im, title="Image/True label")
            if anomaly_score >= threshold:
                image_classified = add_anomalous_label(heat_map, anomaly_score)
            else:
                image_classified = add_normal_label(heat_map, 1 - anomaly_score)
            visualizer.add_image(image=image_classified, title="Prediction")

        visualizer.generate()
        visualizer.figure.suptitle(
            f"F1 threshold: {threshold}, Mask_max: {anomaly_map.max():.3f}, Anomaly_score: {anomaly_score:.3f}"
        )
        filename = Path(filename)
        self._add_images(visualizer, filename, output_label_folder)
        visualizer.close()

        if self.plot_raw_outputs:
            for raw_output, raw_name in zip([heat_map, vis_img], ["heatmap", "segmentation"]):
                if raw_name == "segmentation":
                    raw_output = (raw_output * 255).astype(np.uint8)
                raw_output = cv2.cvtColor(raw_output, cv2.COLOR_RGB2BGR)
                raw_filename = (
                    Path(self.output_path)
                    / "images"
                    / output_label_folder
                    / filename.parent.name
                    / "raw_outputs"
                    / Path(filename.stem + f"_{raw_name}.png")
                )
                raw_filename.parent.mkdir(parents=True, exist_ok=True)
                cv2.imwrite(str(raw_filename), raw_output)

on_test_end(_trainer, pl_module)

Sync logs.

Currently only AnomalibWandbLogger is called from this method. This is because logging as a single batch ensures that all images appear as part of the same step.

Parameters:

  • _trainer (Trainer) –

    Pytorch Lightning trainer (unused)

  • pl_module (LightningModule) –

    Anomaly module

Source code in quadra/callbacks/anomalib.py
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
def on_test_end(self, _trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
    """Sync logs.

    Currently only ``AnomalibWandbLogger`` is called from this method. This is because logging as a single batch
    ensures that all images appear as part of the same step.

    Args:
        _trainer: Pytorch Lightning trainer (unused)
        pl_module: Anomaly module
    """
    if self.disable:
        return

    if pl_module.logger is not None and isinstance(pl_module.logger, AnomalibWandbLogger):
        pl_module.logger.save()