Skip to content

anomaly

_initialize_patchcore_model(patchcore_model, coreset_sampling_ratio=0.1)

Initialize a Patchcore model by simulating a training step.

Parameters:

  • patchcore_model (PatchcoreModel) –

    Patchcore model to initialize

  • coreset_sampling_ratio (float, default: 0.1 ) –

    Coreset sampling ratio to use for the initialization

Returns:

  • PatchcoreModel

    Patchcore model with initialized memory bank

Source code in quadra/utils/tests/fixtures/models/anomaly.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
@torch.inference_mode()
def _initialize_patchcore_model(patchcore_model: PatchcoreModel, coreset_sampling_ratio: float = 0.1) -> PatchcoreModel:
    """Initialize a Patchcore model by simulating a training step.

    Args:
        patchcore_model: Patchcore model to initialize
        coreset_sampling_ratio: Coreset sampling ratio to use for the initialization

    Returns:
        Patchcore model with initialized memory bank
    """
    with torch.no_grad():
        training_features = None
        random_input = torch.rand([1, 3, *patchcore_model.input_size])

        if training_features is None:
            training_features = patchcore_model(random_input)
        else:
            training_features = torch.cat([training_features, patchcore_model(random_input)], dim=0)

        patchcore_model.eval()
        patchcore_model.subsample_embedding(training_features, sampling_ratio=coreset_sampling_ratio)

        # Simulate a memory bank with 5 images, at the current stage patchcore onnx export is not handling
        # large memory banks well, so we are using a small one for the benchmark
        memory_bank_number, memory_bank_n_features = patchcore_model.memory_bank.shape
        patchcore_model.memory_bank = torch.rand([5 * memory_bank_number, memory_bank_n_features])
        patchcore_model.train()

    return patchcore_model

draem()

Yield a draem model.

Source code in quadra/utils/tests/fixtures/models/anomaly.py
67
68
69
70
@pytest.fixture
def draem():
    """Yield a draem model."""
    yield DraemModel()

efficient_ad_small()

Yield a draem model.

Source code in quadra/utils/tests/fixtures/models/anomaly.py
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
@pytest.fixture
def efficient_ad_small():
    """Yield a draem model."""

    class EfficientAdForwardWrapper(EfficientAdModel):
        """Wrap the forward method to avoid passing optional parameters."""

        def forward(self, x):
            return super().forward(x, None)

    model = EfficientAdForwardWrapper(
        teacher_out_channels=384,
        input_size=[256, 256],  # TODO: This is hardcoded may be not a good idea
        pretrained_teacher_type="nelson",
    )

    yield model

padim_resnet18()

Yield a padim model with resnet18 encoder.

Source code in quadra/utils/tests/fixtures/models/anomaly.py
 9
10
11
12
13
14
15
16
17
18
19
@pytest.fixture
def padim_resnet18():
    """Yield a padim model with resnet18 encoder."""
    yield PadimModel(
        input_size=[224, 224],  # TODO: This is hardcoded may be not a good idea
        backbone="resnet18",
        layers=["layer1", "layer2", "layer3"],
        pretrained_weights=None,
        tied_covariance=False,
        pre_trained=False,
    )

patchcore_resnet18()

Yield a patchcore model with resnet18 encoder.

Source code in quadra/utils/tests/fixtures/models/anomaly.py
54
55
56
57
58
59
60
61
62
63
64
@pytest.fixture
def patchcore_resnet18():
    """Yield a patchcore model with resnet18 encoder."""
    model = PatchcoreModel(
        input_size=[224, 224],  # TODO: This is hardcoded may be not a good idea
        backbone="resnet18",
        layers=["layer2", "layer3"],
        pre_trained=False,
    )

    yield _initialize_patchcore_model(model)