diff --git a/combo/data/tokenizers/lambo_tokenizer.py b/combo/data/tokenizers/lambo_tokenizer.py
index e88f098393058ec3fa7ffabbd05ff6d22e528fa6..abb4e33ad9275bb71f72c584e994935b1e6e288d 100644
--- a/combo/data/tokenizers/lambo_tokenizer.py
+++ b/combo/data/tokenizers/lambo_tokenizer.py
@@ -84,6 +84,15 @@ class LamboTokenizer(Tokenizer):
                     _reset_idx()
                     sentence_tokens = []
                     for token in sentence.tokens:
+                        if len(token.subwords) > 0 and split_subwords:
+                            # @TODO this is a very dirty fix for Lambo model's shortcomings
+                            # I noticed that for longer words with multiwords it tends to remove the last letter in the last multiword
+                            # so this is a quick workaround to fix it
+
+                            # check if subwords in token.subwords are consistent with token.text
+                            if "".join(token.subwords) != token.text:
+                                fixed_subwords = fix_subwords(token)
+                                token.subwords = fixed_subwords
                         sentence_tokens.extend(_sentence_tokens(token, split_subwords))
                     tokens.append(sentence_tokens)
         else:
@@ -130,17 +139,7 @@ class LamboTokenizer(Tokenizer):
 
                         # check if subwords in token.subwords are consistent with token.text
                         if "".join(token.subwords) != token.text:
-                            fixed_subwords = []
-                            text_it = 0
-                            for i, subword in enumerate(token.subwords):
-                                if token.text[text_it:text_it + len(subword)] == subword:
-                                    if i == len(token.subwords) - 1 and (text_it + len(subword) < len(token.text)):
-                                        subword = token.text[text_it:]
-                                    fixed_subwords.append(subword)
-                                    text_it += len(subword)
-                                else:
-                                    fixed_subwords.append(token.text[text_it:text_it + len(subword)])
-                                    text_it += len(subword)
+                            fixed_subwords = fix_subwords(token)
                             token.subwords = fixed_subwords
                         # sentence_tokens.extend(_sentence_tokens(token, split_subwords))
                     # else:
@@ -151,3 +150,18 @@ class LamboTokenizer(Tokenizer):
                 sentences.append(sentence_tokens)
 
         return sentences
+
+
+def fix_subwords(token: Token):
+    fixed_subwords = []
+    text_it = 0
+    for i, subword in enumerate(token.subwords):
+        if token.text[text_it:text_it + len(subword)] == subword:
+            if i == len(token.subwords) - 1 and (text_it + len(subword) < len(token.text)):
+                subword = token.text[text_it:]
+            fixed_subwords.append(subword)
+            text_it += len(subword)
+        else:
+            fixed_subwords.append(token.text[text_it:text_it + len(subword)])
+            text_it += len(subword)
+    return fixed_subwords
\ No newline at end of file