diff --git a/experiment/pipeline_process_wikineural_ner.py b/experiment/pipeline_process_wikineural_ner.py
index 8526f53c512cef4971e2bf4e67f539340bf68b9d..c0ff45f70b81a0084995b2d5cd8bee9a94c72c01 100644
--- a/experiment/pipeline_process_wikineural_ner.py
+++ b/experiment/pipeline_process_wikineural_ner.py
@@ -24,8 +24,7 @@ def run_word_wer_pipeline(dataset_name: str, asr_name: str):
                 require_update=False
             )
         ],
-        experiment_repository=get_repository(dataset_name),
-        relation_manager_provider=record_provider
+        experiment_repository=get_repository(dataset_name)
     )
     experiment_processor.process()
 
diff --git a/new_experiment/hf_asr/__init__.py b/new_experiment/hf_asr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/new_experiment/hf_asr/dutch.py b/new_experiment/hf_asr/dutch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ff11f0f8dcfc88aa4212e8578fe99f3a4244daa
--- /dev/null
+++ b/new_experiment/hf_asr/dutch.py
@@ -0,0 +1,30 @@
+from transformers import (Wav2Vec2ForCTC, Wav2Vec2Processor)
+import torch
+
+from scipy.io import wavfile
+
+from new_experiment.hf_asr.wav2vec2_hf import Wav2Vec2AsrProcessor
+
+if __name__ == '__main__':
+    # print('start dutch')
+    # model_name = "facebook/wav2vec2-large-xlsr-53-dutch"
+    # # device = "cuda"
+    # device = "cpu"
+    # chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]'  # noqa: W605
+    #
+    # model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
+    # print('model ready')
+    # processor = Wav2Vec2Processor.from_pretrained(model_name)
+    # print('processor ready')
+    #
+    # samplerate, data = wavfile.read('/Users/marcinwatroba/Desktop/001dffb13ab67884b30e75d670bd23e4a4c8e5ca.wav')
+    #
+    # features = processor(data, sampling_rate=samplerate, padding=True, return_tensors="pt")
+    # input_values = features.input_values.to(device)
+    # attention_mask = features.attention_mask.to(device)
+    # with torch.no_grad():
+    #     logits = model(input_values, attention_mask=attention_mask).logits
+    # pred_ids = torch.argmax(logits, dim=-1)
+    # result = processor.batch_decode(pred_ids)
+    processor = Wav2Vec2AsrProcessor()
+
diff --git a/new_experiment/hf_asr/wav2vec2_hf.py b/new_experiment/hf_asr/wav2vec2_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9e06a0422872a5fc964242e39f127f2ef0fc945
--- /dev/null
+++ b/new_experiment/hf_asr/wav2vec2_hf.py
@@ -0,0 +1,38 @@
+from typing import Dict, Any
+
+import librosa
+import soundfile
+import torch
+from scipy.io import wavfile
+from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
+
+from sziszapangma.integration.asr_processor import AsrProcessor
+from sziszapangma.model.model_creators import create_new_word
+
+
+class Wav2Vec2AsrProcessor(AsrProcessor):
+    _model: Wav2Vec2ForCTC
+    _wav2vec2_processor: Wav2Vec2Processor
+    _device: str
+
+    def __init__(self, model_name: str = "facebook/wav2vec2-large-xlsr-53-dutch"):
+        self._device = 'cuda' if torch.cuda.is_available() else 'cpu'
+        self._model = Wav2Vec2ForCTC.from_pretrained(model_name).to(self._device)
+        self._wav2vec2_processor = Wav2Vec2Processor.from_pretrained(model_name)
+
+    def call_recognise(self, file_path: str) -> Dict[str, Any]:
+        # samplerate, data = wavfile.read(file_path)
+        # data, samplerate = soundfile.read(file_path)
+        data, samplerate = librosa.load(file_path)
+        features = self._wav2vec2_processor(data, sampling_rate=samplerate, padding=True, return_tensors="pt")
+        input_values = features.input_values.to(self._device)
+        attention_mask = features.attention_mask.to(self._device)
+        with torch.no_grad():
+            logits = self._model(input_values, attention_mask=attention_mask).logits
+        pred_ids = torch.argmax(logits, dim=-1)
+        result = self._wav2vec2_processor.batch_decode(pred_ids)[0]
+        return {
+            "transcription": [create_new_word(it) for it in result.split()],
+            "full_text": result,
+            "words_time_alignment": None
+        }
diff --git a/new_experiment/pipeline/pipeline_process_asr.py b/new_experiment/pipeline/pipeline_process_asr.py
new file mode 100644
index 0000000000000000000000000000000000000000..79fe1cb2021faf4ac79104d92eb1e0144dcfb00e
--- /dev/null
+++ b/new_experiment/pipeline/pipeline_process_asr.py
@@ -0,0 +1,38 @@
+from new_experiment.hf_asr.wav2vec2_hf import Wav2Vec2AsrProcessor
+from new_experiment.new_dependency_provider import get_experiment_repository, get_minio_audio_record_repository
+from new_experiment.utils.loaded_remote_dataset_helper import LoadedRemoteDatasetHelper
+from new_experiment.utils.property_helper import PropertyHelper
+from sziszapangma.integration.asr_processor import AsrProcessor
+from sziszapangma.integration.experiment_manager import ExperimentManager
+from sziszapangma.integration.task.asr_task import AsrTask
+
+
+def get_asr_processor(asr_name: str) -> AsrProcessor:
+    if asr_name == 'facebook_wav2vec2_large_xlsr_53_dutch':
+        return Wav2Vec2AsrProcessor('facebook/wav2vec2-large-xlsr-53-dutch')
+    raise Exception(f'AsrProcessor not found for name: {asr_name}')
+
+
+def run_spacy_dep_tag_wer_pipeline(dataset_name: str, asr_name: str):
+    repository = get_experiment_repository(dataset_name)
+    record_provider = LoadedRemoteDatasetHelper(repository, get_minio_audio_record_repository(), dataset_name)
+    experiment_processor = ExperimentManager(
+        record_id_iterator=record_provider,
+        processing_tasks=[
+            AsrTask(
+                asr_property_name=PropertyHelper.asr_result(asr_name),
+                task_name=f'SpacyDepTagSentenceWerProcessor___{dataset_name}___{asr_name}',
+                require_update=False,
+                asr_processor=get_asr_processor(asr_name),
+                record_path_provider=record_provider
+            )
+        ],
+        experiment_repository=repository,
+    )
+    experiment_processor.process()
+
+
+if __name__ == '__main__':
+    run_spacy_dep_tag_wer_pipeline('nl_minds14', 'facebook_wav2vec2_large_xlsr_53_dutch')
+    run_spacy_dep_tag_wer_pipeline('nl_google_fleurs', 'facebook_wav2vec2_large_xlsr_53_dutch')
+    run_spacy_dep_tag_wer_pipeline('nl_voxpopuli', 'facebook_wav2vec2_large_xlsr_53_dutch')
diff --git a/new_experiment/pipeline/pipeline_process_wikineural_ner_wer.py b/new_experiment/pipeline/pipeline_process_wikineural_ner_wer.py
new file mode 100644
index 0000000000000000000000000000000000000000..761abb4315150b7fdd9e47c4f531250b038c4507
--- /dev/null
+++ b/new_experiment/pipeline/pipeline_process_wikineural_ner_wer.py
@@ -0,0 +1,34 @@
+from experiment.experiment_dependency_provider import get_record_provider, get_repository
+from experiment.sentence_wer_processor.wikineural_multilingual_ner_transformers_wer_processor_base import \
+    WikineuralMultilingualNerTransformersWerProcessorBase
+from new_experiment.new_dependency_provider import get_experiment_repository, get_minio_audio_record_repository
+from new_experiment.utils.loaded_remote_dataset_helper import LoadedRemoteDatasetHelper
+
+from new_experiment.utils.property_helper import PropertyHelper
+from sziszapangma.integration.experiment_manager import ExperimentManager
+
+_WIKINEURAL = 'wikineural'
+
+
+def run_wikineural_ner_pipeline(dataset_name: str, asr_name: str):
+    repository = get_experiment_repository(dataset_name)
+    record_provider = LoadedRemoteDatasetHelper(repository, get_minio_audio_record_repository(), dataset_name)
+    experiment_processor = ExperimentManager(
+        record_id_iterator=record_provider,
+        processing_tasks=[
+            WikineuralMultilingualNerTransformersWerProcessorBase(
+                gold_transcript_property_name=PropertyHelper.get_gold_transcript_raw(),
+                asr_property_name=PropertyHelper.asr_result(asr_name),
+                alignment_property_name=PropertyHelper.ner_alignment(asr_name, _WIKINEURAL),
+                wer_property_name=PropertyHelper.ner_metrics(asr_name, _WIKINEURAL),
+                task_name=f'WikineuralMultilingualNerTransformersWerProcessorBase___{dataset_name}___{asr_name}',
+                require_update=False
+            )
+        ],
+        experiment_repository=repository
+    )
+    experiment_processor.process()
+
+
+if __name__ == '__main__':
+    run_wikineural_ner_pipeline('de_minds14', 'whisper_tiny')
diff --git a/new_experiment/utils/loaded_remote_dataset_helper.py b/new_experiment/utils/loaded_remote_dataset_helper.py
index 2ead6e3ce754e57bfbef2ed6d017fb65df5fc388..74ffdfbedfd910629baef51c6e28f8ddb3260f42 100644
--- a/new_experiment/utils/loaded_remote_dataset_helper.py
+++ b/new_experiment/utils/loaded_remote_dataset_helper.py
@@ -23,8 +23,9 @@ class LoadedRemoteDatasetHelper(DatasetHelper):
         return self._experiment_repository.get_all_record_ids_for_property(PropertyHelper.get_gold_transcript_words())
 
     def get_path(self, record_id: str) -> str:
+        print('get_path')
         record_path = Path.home() / f'.cache/asr_benchmark/{self._dataset_name}/{record_id}.wav'
         if record_path.exists():
             return record_path.as_posix()
-        self._minio_audio_record_repository.save_file(record_path, self._dataset_name, record_id)
+        self._minio_audio_record_repository.load_file(record_path, self._dataset_name, record_id)
         return record_path.as_posix()
diff --git a/new_experiment/utils/minio_audio_record_repository.py b/new_experiment/utils/minio_audio_record_repository.py
index 0c699c8f5c71566988b05312ae798d7eaca30866..366d83d4d78ce3fc61349a5b53961cca2d5a675e 100644
--- a/new_experiment/utils/minio_audio_record_repository.py
+++ b/new_experiment/utils/minio_audio_record_repository.py
@@ -21,6 +21,7 @@ class MinioAudioRecordRepository:
                                len(open(local_path, 'rb').read()))
 
     def load_file(self, local_path: Path, dataset_name: str, record_id: str):
+        print('load_file')
         record_response: HTTPResponse = self._minio.get_object(self._bucket,
                                                                self._get_record_path(dataset_name, record_id))
         local_path.parent.mkdir(parents=True, exist_ok=True)
diff --git a/new_experiment/worker.py b/new_experiment/worker.py
index bdc90229b37d4a6d1e4df6382dda35a8139a7ff3..70dd0b2b19d5db2c1476a60fca968fe2a2081731 100644
--- a/new_experiment/worker.py
+++ b/new_experiment/worker.py
@@ -1,7 +1,11 @@
 import json
 import os
 
+import functools
+import logging
 import pika
+import threading
+import time
 
 from new_experiment.pipeline.pipeline_process_spacy_dep_tag_wer import run_spacy_dep_tag_wer_pipeline
 from new_experiment.pipeline.pipeline_process_spacy_ner_wer import run_spacy_ner_wer_pipeline
@@ -20,7 +24,8 @@ _RABBIT_URL = get_param('RABBIT_URL',
 
 def main():
     parameters = pika.URLParameters(_RABBIT_URL)
-    parameters._heartbeat = 65535
+    parameters._heartbeat = 0
+    # parameters._heartbeat = 65535
     connection = pika.BlockingConnection(parameters=parameters)
     channel = connection.channel()
     channel.basic_qos(prefetch_count=1)
@@ -55,5 +60,71 @@ def main():
     connection.close()
 
 
+def new_main():
+    LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
+                  '-35s %(lineno) -5d: %(message)s')
+    LOGGER = logging.getLogger(__name__)
+
+    logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
+
+    def ack_message(channel, delivery_tag):
+        """Note that `channel` must be the same pika channel instance via which
+        the message being ACKed was retrieved (AMQP protocol constraint).
+        """
+        if channel.is_open:
+            channel.basic_ack(delivery_tag)
+        else:
+            # Channel is already closed, so we can't ACK this message;
+            # log and/or do something that makes sense for your app in this case.
+            pass
+
+    def do_work(connection, channel, delivery_tag, body):
+        thread_id = threading.get_ident()
+        fmt1 = 'Thread id: {} Delivery tag: {} Message body: {}'
+        LOGGER.info(fmt1.format(thread_id, delivery_tag, body))
+        # Sleeping to simulate 10 seconds of work
+        time.sleep(10)
+        cb = functools.partial(ack_message, channel, delivery_tag)
+        connection.add_callback_threadsafe(cb)
+
+    def on_message(channel, method_frame, header_frame, body, args):
+        (connection, threads) = args
+        delivery_tag = method_frame.delivery_tag
+        t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
+        t.start()
+        threads.append(t)
+
+    credentials = pika.PlainCredentials('guest', 'guest')
+    # Note: sending a short heartbeat to prove that heartbeats are still
+    # sent even though the worker simulates long-running work
+    parameters = pika.ConnectionParameters('localhost', credentials=credentials, heartbeat=5)
+    connection = pika.BlockingConnection(parameters)
+
+    channel = connection.channel()
+    channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True,
+                             auto_delete=False)
+    channel.queue_declare(queue="standard", auto_delete=True)
+    channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
+    # Note: prefetch is set to 1 here as an example only and to keep the number of threads created
+    # to a reasonable amount. In production you will want to test with different prefetch values
+    # to find which one provides the best performance and usability for your solution
+    channel.basic_qos(prefetch_count=1)
+
+    threads = []
+    on_message_callback = functools.partial(on_message, args=(connection, threads))
+    channel.basic_consume('standard', on_message_callback)
+
+    try:
+        channel.start_consuming()
+    except KeyboardInterrupt:
+        channel.stop_consuming()
+
+    # Wait for all to complete
+    for thread in threads:
+        thread.join()
+
+    connection.close()
+
+
 if __name__ == '__main__':
     main()
diff --git a/poetry.lock b/poetry.lock
index 570402e851cf0d9435e7ee8296bcfd7389e113b7..e465bba9f32102d8173879e751568b55d06c3fab 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -5357,6 +5357,57 @@ webencodings = ">=0.4"
 doc = ["sphinx", "sphinx_rtd_theme"]
 test = ["flake8", "isort", "pytest"]
 
+[[package]]
+name = "tokenizers"
+version = "0.13.2"
+description = "Fast and Customizable Tokenizers"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+    {file = "tokenizers-0.13.2-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:a6f36b1b499233bb4443b5e57e20630c5e02fba61109632f5e00dab970440157"},
+    {file = "tokenizers-0.13.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:bc6983282ee74d638a4b6d149e5dadd8bc7ff1d0d6de663d69f099e0c6bddbeb"},
+    {file = "tokenizers-0.13.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16756e6ab264b162f99c0c0a8d3d521328f428b33374c5ee161c0ebec42bf3c0"},
+    {file = "tokenizers-0.13.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b10db6e4b036c78212c6763cb56411566edcf2668c910baa1939afd50095ce48"},
+    {file = "tokenizers-0.13.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:238e879d1a0f4fddc2ce5b2d00f219125df08f8532e5f1f2ba9ad42f02b7da59"},
+    {file = "tokenizers-0.13.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47ef745dbf9f49281e900e9e72915356d69de3a4e4d8a475bda26bfdb5047736"},
+    {file = "tokenizers-0.13.2-cp310-cp310-win32.whl", hash = "sha256:96cedf83864bcc15a3ffd088a6f81a8a8f55b8b188eabd7a7f2a4469477036df"},
+    {file = "tokenizers-0.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:eda77de40a0262690c666134baf19ec5c4f5b8bde213055911d9f5a718c506e1"},
+    {file = "tokenizers-0.13.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a689654fc745135cce4eea3b15e29c372c3e0b01717c6978b563de5c38af9811"},
+    {file = "tokenizers-0.13.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3606528c07cda0566cff6cbfbda2b167f923661be595feac95701ffcdcbdbb21"},
+    {file = "tokenizers-0.13.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:41291d0160946084cbd53c8ec3d029df3dc2af2673d46b25ff1a7f31a9d55d51"},
+    {file = "tokenizers-0.13.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7892325f9ca1cc5fca0333d5bfd96a19044ce9b092ce2df625652109a3de16b8"},
+    {file = "tokenizers-0.13.2-cp311-cp311-win32.whl", hash = "sha256:93714958d4ebe5362d3de7a6bd73dc86c36b5af5941ebef6c325ac900fa58865"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:da521bfa94df6a08a6254bb8214ea04854bb9044d61063ae2529361688b5440a"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a739d4d973d422e1073989769723f3b6ad8b11e59e635a63de99aea4b2208188"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cac01fc0b868e4d0a3aa7c5c53396da0a0a63136e81475d32fcf5c348fcb2866"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0901a5c6538d2d2dc752c6b4bde7dab170fddce559ec75662cfad03b3187c8f6"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ba9baa76b5a3eefa78b6cc351315a216232fd727ee5e3ce0f7c6885d9fb531b"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-win32.whl", hash = "sha256:a537061ee18ba104b7f3daa735060c39db3a22c8a9595845c55b6c01d36c5e87"},
+    {file = "tokenizers-0.13.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c82fb87b1cbfa984d8f05b2b3c3c73e428b216c1d4f0e286d0a3b27f521b32eb"},
+    {file = "tokenizers-0.13.2-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:ce298605a833ac7f81b8062d3102a42dcd9fa890493e8f756112c346339fe5c5"},
+    {file = "tokenizers-0.13.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a51b93932daba12ed07060935978a6779593a59709deab04a0d10e6fd5c29e60"},
+    {file = "tokenizers-0.13.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6969e5ea7ccb909ce7d6d4dfd009115dc72799b0362a2ea353267168667408c4"},
+    {file = "tokenizers-0.13.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:92f040c4d938ea64683526b45dfc81c580e3b35aaebe847e7eec374961231734"},
+    {file = "tokenizers-0.13.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3bc9f7d7f4c1aa84bb6b8d642a60272c8a2c987669e9bb0ac26daf0c6a9fc8"},
+    {file = "tokenizers-0.13.2-cp38-cp38-win32.whl", hash = "sha256:efbf189fb9cf29bd29e98c0437bdb9809f9de686a1e6c10e0b954410e9ca2142"},
+    {file = "tokenizers-0.13.2-cp38-cp38-win_amd64.whl", hash = "sha256:0b4cb2c60c094f31ea652f6cf9f349aae815f9243b860610c29a69ed0d7a88f8"},
+    {file = "tokenizers-0.13.2-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:b47d6212e7dd05784d7330b3b1e5a170809fa30e2b333ca5c93fba1463dec2b7"},
+    {file = "tokenizers-0.13.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:80a57501b61ec4f94fb7ce109e2b4a1a090352618efde87253b4ede6d458b605"},
+    {file = "tokenizers-0.13.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61507a9953f6e7dc3c972cbc57ba94c80c8f7f686fbc0876afe70ea2b8cc8b04"},
+    {file = "tokenizers-0.13.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c09f4fa620e879debdd1ec299bb81e3c961fd8f64f0e460e64df0818d29d845c"},
+    {file = "tokenizers-0.13.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:66c892d85385b202893ac6bc47b13390909e205280e5df89a41086cfec76fedb"},
+    {file = "tokenizers-0.13.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3e306b0941ad35087ae7083919a5c410a6b672be0343609d79a1171a364ce79"},
+    {file = "tokenizers-0.13.2-cp39-cp39-win32.whl", hash = "sha256:79189e7f706c74dbc6b753450757af172240916d6a12ed4526af5cc6d3ceca26"},
+    {file = "tokenizers-0.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:486d637b413fddada845a10a45c74825d73d3725da42ccd8796ccd7a1c07a024"},
+    {file = "tokenizers-0.13.2.tar.gz", hash = "sha256:f9525375582fd1912ac3caa2f727d36c86ff8c0c6de45ae1aaff90f87f33b907"},
+]
+
+[package.extras]
+dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
+
 [[package]]
 name = "tomli"
 version = "2.0.1"
@@ -5512,6 +5563,72 @@ files = [
 docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
 test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
 
+[[package]]
+name = "transformers"
+version = "4.25.1"
+description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
+category = "main"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+    {file = "transformers-4.25.1-py3-none-any.whl", hash = "sha256:60f1be15e17e4a54373c787c713ec149dabcc63464131ac45611618fe7c2016e"},
+    {file = "transformers-4.25.1.tar.gz", hash = "sha256:6dad398b792d45dc04e9ee7e9e06bf758ab19dca2efc119065e661bb0f8f843b"},
+]
+
+[package.dependencies]
+filelock = "*"
+huggingface-hub = ">=0.10.0,<1.0"
+numpy = ">=1.17"
+packaging = ">=20.0"
+pyyaml = ">=5.1"
+regex = "!=2019.12.17"
+requests = "*"
+tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14"
+tqdm = ">=4.27"
+
+[package.extras]
+accelerate = ["accelerate (>=0.10.0)"]
+all = ["Pillow", "accelerate (>=0.10.0)", "codecarbon (==1.2.0)", "flax (>=0.4.1)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.11)", "tensorflow-text", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.7,!=1.12.0)", "torchaudio"]
+audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+codecarbon = ["codecarbon (==1.2.0)"]
+deepspeed = ["accelerate (>=0.10.0)", "deepspeed (>=0.6.5)"]
+deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.10.0)", "beautifulsoup4", "black (==22.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.6.5)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "timeout-decorator"]
+dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.10.0)", "beautifulsoup4", "black (==22.3)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flake8 (>=3.8.3)", "flax (>=0.4.1)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pyknp (>=0.6.1)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.11)", "tensorflow-text", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.7,!=1.12.0)", "torchaudio", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (==22.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flake8 (>=3.8.3)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.11)", "tensorflow-text", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)"]
+dev-torch = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (==22.3)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flake8 (>=3.8.3)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pyknp (>=0.6.1)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.7,!=1.12.0)", "torchaudio", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+docs = ["Pillow", "accelerate (>=0.10.0)", "codecarbon (==1.2.0)", "flax (>=0.4.1)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.11)", "tensorflow-text", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.7,!=1.12.0)", "torchaudio"]
+docs-specific = ["hf-doc-builder"]
+fairscale = ["fairscale (>0.3)"]
+flax = ["flax (>=0.4.1)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "optax (>=0.0.8)"]
+flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+ftfy = ["ftfy"]
+integrations = ["optuna", "ray[tune]", "sigopt"]
+ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "pyknp (>=0.6.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+modelcreation = ["cookiecutter (==1.7.3)"]
+natten = ["natten (>=0.14.4)"]
+onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"]
+onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+optuna = ["optuna"]
+quality = ["GitPython (<3.1.19)", "black (==22.3)", "datasets (!=2.5.0)", "flake8 (>=3.8.3)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)"]
+ray = ["ray[tune]"]
+retrieval = ["datasets (!=2.5.0)", "faiss-cpu"]
+sagemaker = ["sagemaker (>=2.31.0)"]
+sentencepiece = ["protobuf (<=3.20.2)", "sentencepiece (>=0.1.91,!=0.1.92)"]
+serving = ["fastapi", "pydantic", "starlette", "uvicorn"]
+sigopt = ["sigopt"]
+sklearn = ["scikit-learn"]
+speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (==22.3)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "timeout-decorator"]
+tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.4,<2.11)", "tensorflow-text", "tf2onnx"]
+tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.4,<2.11)", "tensorflow-text", "tf2onnx"]
+tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+timm = ["timm"]
+tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"]
+torch = ["torch (>=1.7,!=1.12.0)"]
+torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+torchhub = ["filelock", "huggingface-hub (>=0.10.0,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.2)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.7,!=1.12.0)", "tqdm (>=4.27)"]
+vision = ["Pillow"]
+
 [[package]]
 name = "typer"
 version = "0.7.0"
@@ -6025,4 +6142,4 @@ testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools"
 [metadata]
 lock-version = "2.0"
 python-versions = ">=3.8,<3.12"
-content-hash = "4ba6bc6633835103fe1fc4818d8bd408552ee78cde9c17b406e9be200dea28ee"
+content-hash = "db5e937e072440888cfbd5f39f302932dddf5b43ad94fdddc6c7c88841f89700"
diff --git a/pyproject.toml b/pyproject.toml
index 26750172549cb1d252679e44e3a543698d6e1c41..92a8240c33bbd05cad61b8a160f3dcf523f7e9e4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -34,6 +34,7 @@ pika = "^1.3.1"
 pyopenssl = "^23.0.0"
 nltk = "^3.8.1"
 jupyterlab = "^3.5.2"
+transformers = "^4.25.1"
 
 [tool.poetry.group.dev.dependencies]
 pytest = "^7.2.0"
diff --git a/sziszapangma/integration/task/asr_task.py b/sziszapangma/integration/task/asr_task.py
index 3c8f444426c7565037603c02c5e3f8d8d1201b8b..5700195d33f314ad1283b9b5d2ec87704ff2b196 100644
--- a/sziszapangma/integration/task/asr_task.py
+++ b/sziszapangma/integration/task/asr_task.py
@@ -32,8 +32,7 @@ class AsrTask(ProcessingTask):
     def run_single_process(
         self,
         record_id: str,
-        experiment_repository: ExperimentRepository,
-        relation_manager: RelationManager,
+        experiment_repository: ExperimentRepository
     ) -> None:
         file_record_path = self._record_path_provider.get_path(record_id)
         print('before call_recognise', flush=True)