Skip to content
Snippets Groups Projects
Commit e060a26d authored by martynawiacek's avatar martynawiacek
Browse files

Add try/catch clause for sentences with large number of wordpieces.

parent 0b63a2c3
No related branches found
No related tags found
2 merge requests!41Add try/catch clause for sentences with large number of wordpieces.,!40Add try/catch clause for sentences with large number of wordpieces.
Pipeline #4283 failed
This commit is part of merge request !40. Comments created here will be created in the context of that merge request.
import logging
import sys
from typing import Optional, Dict, Any, List, Tuple from typing import Optional, Dict, Any, List, Tuple
from allennlp import data from allennlp import data
from allennlp.data import token_indexers, tokenizers, IndexedTokenList, vocabulary from allennlp.data import token_indexers, tokenizers, IndexedTokenList, vocabulary
from overrides import overrides from overrides import overrides
from typing import List logger = logging.getLogger(__name__)
@data.TokenIndexer.register("pretrained_transformer_mismatched_fixed") @data.TokenIndexer.register("pretrained_transformer_mismatched_fixed")
class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransformerMismatchedIndexer): class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransformerMismatchedIndexer):
...@@ -34,6 +35,7 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme ...@@ -34,6 +35,7 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme
Method is overridden in order to raise an error while the number of tokens needed to embed a sentence exceeds the Method is overridden in order to raise an error while the number of tokens needed to embed a sentence exceeds the
maximal input of a model. maximal input of a model.
""" """
try:
self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary) self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary)
wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize( wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize(
...@@ -57,6 +59,10 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme ...@@ -57,6 +59,10 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme
return self._matched_indexer._postprocess_output(output) return self._matched_indexer._postprocess_output(output)
except ValueError as value_error:
logger.error(value_error)
sys.exit(1)
class PretrainedTransformerIndexer(token_indexers.PretrainedTransformerIndexer): class PretrainedTransformerIndexer(token_indexers.PretrainedTransformerIndexer):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment