Shortcuts

Source code for mmocr.models.textdet.detectors.single_stage_text_detector

# Copyright (c) OpenMMLab. All rights reserved.
import torch

from mmocr.models.builder import DETECTORS
from mmocr.models.common.detectors import SingleStageDetector


[docs]@DETECTORS.register_module() class SingleStageTextDetector(SingleStageDetector): """The class for implementing single stage text detector.""" def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): SingleStageDetector.__init__(self, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
[docs] def forward_train(self, img, img_metas, **kwargs): """ Args: img (Tensor): Input images of shape (N, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys, see :class:`mmdet.datasets.pipelines.Collect`. Returns: dict[str, Tensor]: A dictionary of loss components. """ x = self.extract_feat(img) preds = self.bbox_head(x) losses = self.bbox_head.loss(preds, **kwargs) return losses
[docs] def simple_test(self, img, img_metas, rescale=False): x = self.extract_feat(img) outs = self.bbox_head(x) # early return to avoid post processing if torch.onnx.is_in_onnx_export(): return outs if len(img_metas) > 1: boundaries = [ self.bbox_head.get_boundary(*(outs[i].unsqueeze(0)), [img_metas[i]], rescale) for i in range(len(img_metas)) ] else: boundaries = [ self.bbox_head.get_boundary(*outs, img_metas, rescale) ] return boundaries
Read the Docs v: v0.4.0
Versions
latest
stable
v0.4.0
v0.3.0
v0.2.1
v0.2.0
v0.1.0
Downloads
On Read the Docs
Project Home
Builds

Free document hosting provided by Read the Docs.