diff --git a/doctr/models/modules/transformer/pytorch.py b/doctr/models/modules/transformer/pytorch.py index 63ad346c2f..190a12da63 100644 --- a/doctr/models/modules/transformer/pytorch.py +++ b/doctr/models/modules/transformer/pytorch.py @@ -37,7 +37,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: Returns: positional embeddings (batch, max_len, d_model) """ - x = x + self.pe[:, : x.size(1)] # type: ignore + x = x + self.pe[:, : x.size(1)] return self.dropout(x) diff --git a/doctr/models/recognition/crnn/pytorch.py b/doctr/models/recognition/crnn/pytorch.py index b1e50f1ad9..daf0e56e58 100644 --- a/doctr/models/recognition/crnn/pytorch.py +++ b/doctr/models/recognition/crnn/pytorch.py @@ -249,7 +249,7 @@ def _crnn( _cfg["input_shape"] = kwargs["input_shape"] # Build the model - model = CRNN(feat_extractor, cfg=_cfg, **kwargs) # type: ignore[arg-type] + model = CRNN(feat_extractor, cfg=_cfg, **kwargs) # Load pretrained parameters if pretrained: # The number of classes is not the same as the number of classes in the pretrained model =>