Skip to content
Snippets Groups Projects

Release 1.0.0b2.

3 files
+ 44
50
Compare changes
  • Side-by-side
  • Inline

Files

+ 6
5
@@ -29,8 +29,9 @@ class COMBO(predictor.Predictor):
super().__init__(model, dataset_reader)
self.batch_size = batch_size
self.vocab = model.vocab
self._dataset_reader.generate_labels = False
self._dataset_reader.lazy = True
self.dataset_reader = self._dataset_reader
self.dataset_reader.generate_labels = False
self.dataset_reader.lazy = True
self._tokenizer = tokenizer
self.without_sentence_embedding = False
self.line_to_conllu = line_to_conllu
@@ -112,7 +113,7 @@ class COMBO(predictor.Predictor):
tokens = sentence
else:
raise ValueError("Input must be either string or list of strings.")
return self._dataset_reader.text_to_instance(tokens2conllu(tokens))
return self.dataset_reader.text_to_instance(tokens2conllu(tokens))
@overrides
def load_line(self, line: str) -> common.JsonDict:
@@ -125,7 +126,7 @@ class COMBO(predictor.Predictor):
if self.without_sentence_embedding:
outputs.sentence_embedding = []
if self.line_to_conllu:
return sentence2conllu(outputs, keep_semrel=self._dataset_reader.use_sem).serialize()
return sentence2conllu(outputs, keep_semrel=self.dataset_reader.use_sem).serialize()
else:
return outputs.to_json()
@@ -134,7 +135,7 @@ class COMBO(predictor.Predictor):
return {"sentence": sentence}
def _to_input_instance(self, sentence: data.Sentence) -> allen_data.Instance:
return self._dataset_reader.text_to_instance(sentence2conllu(sentence))
return self.dataset_reader.text_to_instance(sentence2conllu(sentence))
def _predictions_as_tree(self, predictions: Dict[str, Any], instance: allen_data.Instance):
tree = instance.fields["metadata"]["input"]
Loading