diff --git a/combo/data/vocabulary.py b/combo/data/vocabulary.py index 3cf5fed37405635090686edbed331ae7e1ee554e..1c0a3843f2f4d555de04580f15b0184b8e094cc3 100644 --- a/combo/data/vocabulary.py +++ b/combo/data/vocabulary.py @@ -300,7 +300,6 @@ class Vocabulary(FromParameters): filename = os.path.join(directory, namespace_filename) vocab.set_from_file(filename, is_padded, namespace=namespace, oov_token=oov_token) - get_slices_if_not_provided(vocab) vocab.constructed_from = 'from_files' return vocab @@ -758,6 +757,7 @@ class Vocabulary(FromParameters): def get_slices_if_not_provided(vocab: Vocabulary): if hasattr(vocab, "slices"): return vocab.slices + print("Getting slices...") if "feats_labels" in vocab.get_namespaces(): idx2token = vocab.get_index_to_token_vocabulary("feats_labels") diff --git a/combo/main.py b/combo/main.py index a426b10e88f1f801a64f7e1d83b192a834c832b5..aeb1dc8ab7c5a834690cb579b0b8f163ed4b1e30 100755 --- a/combo/main.py +++ b/combo/main.py @@ -92,25 +92,11 @@ flags.DEFINE_enum(name="predictor_name", default="combo-lambo", help="Use predictor with whitespace, spacy or lambo (recommended) tokenizer.") -def get_saved_model(parameters) -> ComboModel: - return ComboModel.load(os.path.join(FLAGS.model_path), - config=parameters, - weights_file=os.path.join(FLAGS.model_path, 'best.th'), - cuda_device=FLAGS.cuda_device) - - def get_predictor() -> COMBO: - # Check for GPU - # allen_checks.check_for_gpu(FLAGS.cuda_device) checks.file_exists(FLAGS.model_path) - with open(os.path.join(FLAGS.model_path, 'params.json'), 'r') as f: - serialized = json.load(f) - model = get_saved_model(serialized) - if 'dataset_reader' in serialized: - dataset_reader = resolve(serialized['dataset_reader']) - else: - dataset_reader = default_ud_dataset_reader() - return COMBO(model, dataset_reader) + arch = load_archive(FLAGS.model_path) + dataset_reader = default_ud_dataset_reader() + return COMBO(arch.model, dataset_reader) def run(_):