diff --git a/new_experiment/add_to_queue_pipeline.py b/new_experiment/add_to_queue_pipeline.py
index ef3bf2f4c970732960815ac379735f4c1332bc14..55a4f9bcf0dad0b1b2140b03d8e2a06eaa9689d9 100644
--- a/new_experiment/add_to_queue_pipeline.py
+++ b/new_experiment/add_to_queue_pipeline.py
@@ -88,8 +88,8 @@ def main():
     connection = pika.BlockingConnection(parameters=parameters)
     channel = connection.channel()
     # add_whisper(channel)
-    add_facebook_hf_wav2vec2_asr(channel)
-    # add_facebook_hf_wav2vec2_pipeline(channel)
+    # add_facebook_hf_wav2vec2_asr(channel)
+    add_facebook_hf_wav2vec2_pipeline(channel)
     connection.close()
 
 
diff --git a/new_experiment/download_dataset_to_cache.py b/new_experiment/download_dataset_to_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..88b2e7e6b89dff289e2e1dd8d33479863243c449
--- /dev/null
+++ b/new_experiment/download_dataset_to_cache.py
@@ -0,0 +1,18 @@
+import argparse
+
+from new_experiment.new_dependency_provider import get_experiment_repository, get_minio_audio_record_repository
+from new_experiment.utils.loaded_remote_dataset_helper import LoadedRemoteDatasetHelper
+
+
+def download_dataset_to_cache(dataset_name: str):
+    repository = get_experiment_repository(dataset_name)
+    record_provider = LoadedRemoteDatasetHelper(repository, get_minio_audio_record_repository(), dataset_name)
+    for record_id in record_provider.get_all_records():
+        record_provider.get_path(record_id)
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--dataset")
+    args = parser.parse_args()
+    download_dataset_to_cache(args.dataset)
diff --git a/new_experiment/hf_asr/import_nvidia_nemo_asr_result.py b/new_experiment/hf_asr/import_nvidia_nemo_asr_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..caefe511c3bd9c98e77ac92693bbc7833ce63705
--- /dev/null
+++ b/new_experiment/hf_asr/import_nvidia_nemo_asr_result.py
@@ -0,0 +1,44 @@
+import json
+from pathlib import Path
+
+from new_experiment.new_dependency_provider import get_experiment_repository
+from sziszapangma.model.model_creators import create_new_word
+
+
+def load_nemo_asr_results(dataset_name: str):
+    repository = get_experiment_repository(dataset_name)
+    jsonl_path = Path.home() / f'.cache/asr_benchmarks/{dataset_name}.json'
+    language_code = dataset_name[:2]
+    with open(jsonl_path, 'r') as reader:
+        lines = reader.read().splitlines(keepends=False)
+        for json_line in lines:
+            parsed_json = json.loads(json_line)
+            print(parsed_json['audio_filepath'].split('/')[-1])
+            record_id = parsed_json['audio_filepath'].split('/')[-1][:-4]
+            print(parsed_json['audio_filepath'].split('/')[-1][:-4])
+            transcript = parsed_json['pred_text']
+            asr_result = {
+                "transcription": [create_new_word(it) for it in transcript.split()],
+                "full_text": transcript,
+                "words_time_alignment": None
+            }
+            property_name = f'nvidia_stt_{language_code}_conformer_transducer_large'
+            repository.update_property_for_key(record_id, property_name, asr_result)
+
+
+if __name__ == '__main__':
+    load_nemo_asr_results('de_google_fleurs')
+    load_nemo_asr_results('de_minds14')
+    load_nemo_asr_results('de_voxpopuli')
+    load_nemo_asr_results('en_google_fleurs')
+    load_nemo_asr_results('en_minds14')
+    load_nemo_asr_results('en_voxpopuli')
+    load_nemo_asr_results('es_google_fleurs')
+    load_nemo_asr_results('es_minds14')
+    load_nemo_asr_results('es_voxpopuli')
+    load_nemo_asr_results('fr_google_fleurs')
+    load_nemo_asr_results('fr_minds14')
+    load_nemo_asr_results('fr_voxpopuli')
+    load_nemo_asr_results('it_google_fleurs')
+    load_nemo_asr_results('it_minds14')
+    load_nemo_asr_results('it_voxpopuli')