Source code for kornia.feature.hardnet

from typing import Dict

import torch
import torch.nn as nn
import torch.nn.functional as F

urls: Dict[str, str] = dict()
urls["hardnet++"] = "https://github.com/DagnyT/hardnet/raw/master/pretrained/pretrained_all_datasets/HardNet++.pth"
urls["liberty_aug"] = "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth"  # noqa pylint: disable


[docs]class HardNet(nn.Module): """ Module, which computes HardNet descriptors of given grayscale patches of 32x32. This is based on the original code from paper "Working hard to know your neighbor's margins: Local descriptor learning loss". See :cite:`HardNet2017` for more details. Args: pretrained: (bool) Download and set pretrained weights to the model. Default: false. Returns: torch.Tensor: HardeNet descriptor of the patches. Shape: - Input: (B, 1, 32, 32) - Output: (B, 128) Examples: >>> input = torch.rand(16, 1, 32, 32) >>> hardnet = kornia.feature.HardNet() >>> descs = hardnet(input) # 16x128 """ def __init__(self, pretrained: bool = False) -> None: super(HardNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(32, affine=False), nn.ReLU(), nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(32, affine=False), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(64, affine=False), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(64, affine=False), nn.ReLU(), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(128, affine=False), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(128, affine=False), nn.ReLU(), nn.Dropout(0.3), nn.Conv2d(128, 128, kernel_size=8, bias=False), nn.BatchNorm2d(128, affine=False), ) # use torch.hub to load pretrained model if pretrained: pretrained_dict = torch.hub.load_state_dict_from_url( urls['liberty_aug'], map_location=lambda storage, loc: storage ) self.load_state_dict(pretrained_dict['state_dict'], strict=True) @staticmethod def _normalize_input(x: torch.Tensor, eps: float = 1e-6) -> torch.Tensor: "Utility function that normalizes the input by batch.""" sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True) # WARNING: we need to .detach() input, otherwise the gradients produced by # the patches extractor with F.grid_sample are very noisy, making the detector # training totally unstable. return (x - mp.detach()) / (sp.detach() + eps) def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore x_norm: torch.Tensor = self._normalize_input(input) x_features: torch.Tensor = self.features(x_norm) x_out = x_features.view(x_features.size(0), -1) return F.normalize(x_out, dim=1)