Skip to content
Snippets Groups Projects
Commit 1abcab10 authored by martynawiacek's avatar martynawiacek
Browse files

moved try/except to combo predictor

parent e060a26d
No related branches found
No related tags found
1 merge request!41Add try/catch clause for sentences with large number of wordpieces.
Pipeline #4333 passed
This commit is part of merge request !41. Comments created here will be created in the context of that merge request.
import logging
import sys
from typing import Optional, Dict, Any, List, Tuple from typing import Optional, Dict, Any, List, Tuple
from allennlp import data from allennlp import data
from allennlp.data import token_indexers, tokenizers, IndexedTokenList, vocabulary from allennlp.data import token_indexers, tokenizers, IndexedTokenList, vocabulary
from overrides import overrides from overrides import overrides
logger = logging.getLogger(__name__)
@data.TokenIndexer.register("pretrained_transformer_mismatched_fixed") @data.TokenIndexer.register("pretrained_transformer_mismatched_fixed")
class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransformerMismatchedIndexer): class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransformerMismatchedIndexer):
...@@ -35,7 +32,6 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme ...@@ -35,7 +32,6 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme
Method is overridden in order to raise an error while the number of tokens needed to embed a sentence exceeds the Method is overridden in order to raise an error while the number of tokens needed to embed a sentence exceeds the
maximal input of a model. maximal input of a model.
""" """
try:
self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary) self._matched_indexer._add_encoding_to_vocabulary_if_needed(vocabulary)
wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize( wordpieces, offsets = self._allennlp_tokenizer.intra_word_tokenize(
...@@ -59,10 +55,6 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme ...@@ -59,10 +55,6 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme
return self._matched_indexer._postprocess_output(output) return self._matched_indexer._postprocess_output(output)
except ValueError as value_error:
logger.error(value_error)
sys.exit(1)
class PretrainedTransformerIndexer(token_indexers.PretrainedTransformerIndexer): class PretrainedTransformerIndexer(token_indexers.PretrainedTransformerIndexer):
......
import logging import logging
import os import os
import sys
from typing import List, Union, Dict, Any from typing import List, Union, Dict, Any
import numpy as np import numpy as np
...@@ -48,7 +49,12 @@ class COMBO(predictor.Predictor): ...@@ -48,7 +49,12 @@ class COMBO(predictor.Predictor):
:param sentence: sentence(s) representation :param sentence: sentence(s) representation
:return: Sentence or List[Sentence] depending on the input :return: Sentence or List[Sentence] depending on the input
""" """
try:
return self.predict(sentence) return self.predict(sentence)
except Exception as e:
logger.error(e)
logger.error('Exiting.')
sys.exit(1)
def predict(self, sentence: Union[str, List[str], List[List[str]], List[data.Sentence]]): def predict(self, sentence: Union[str, List[str], List[List[str]], List[data.Sentence]]):
if isinstance(sentence, str): if isinstance(sentence, str):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment