diff --git a/example_inputs/marek_kowalski_pojechal_do_wroclawia.ccl b/example_inputs/marek_kowalski_pojechal_do_wroclawia.ccl
index b19c4001e36a636590731e21ea65858e61f31073..f1459bae1dfce8f4fe73951895dc5e2c8762d90f 100644
--- a/example_inputs/marek_kowalski_pojechal_do_wroclawia.ccl
+++ b/example_inputs/marek_kowalski_pojechal_do_wroclawia.ccl
@@ -7,39 +7,45 @@
     <orth>Marek</orth>
     <lex disamb="1"><base>Marek</base><ctag>subst:sg:nom:m1</ctag></lex>
     <lex disamb="1"><base>marek</base><ctag>subst:sg:nom:m1</ctag></lex>
-    <ann chan="nam_liv" head="1">1</ann>
-    <ann chan="nam_loc">0</ann>
+    <ann chan="person_first_nam" head="1">1</ann>
+    <ann chan="person_last_nam">0</ann>
+    <ann chan="city_nam">0</ann>
    </tok>
    <tok>
     <orth>Kowalski</orth>
     <lex disamb="1"><base>Kowalski</base><ctag>subst:sg:nom:m1</ctag></lex>
-    <ann chan="nam_liv">1</ann>
-    <ann chan="nam_loc">0</ann>
+    <ann chan="person_first_nam">0</ann>
+    <ann chan="person_last_nam" head="1">1</ann>
+    <ann chan="city_nam">0</ann>
    </tok>
    <tok>
     <orth>pojechał</orth>
     <lex disamb="1"><base>pojechać</base><ctag>praet:sg:m1:perf</ctag></lex>
-    <ann chan="nam_liv">0</ann>
-    <ann chan="nam_loc">0</ann>
+    <ann chan="person_first_nam">0</ann>
+    <ann chan="person_last_nam">0</ann>
+    <ann chan="city_nam">0</ann>
    </tok>
    <tok>
     <orth>do</orth>
     <lex disamb="1"><base>do</base><ctag>prep:gen</ctag></lex>
-    <ann chan="nam_liv">0</ann>
-    <ann chan="nam_loc">0</ann>
+    <ann chan="person_first_nam">0</ann>
+    <ann chan="person_last_nam">0</ann>
+    <ann chan="city_nam">0</ann>
    </tok>
    <tok>
     <orth>Wrocławia</orth>
     <lex disamb="1"><base>Wrocław</base><ctag>subst:sg:gen:m3</ctag></lex>
-    <ann chan="nam_liv">0</ann>
-    <ann chan="nam_loc" head="1">1</ann>
+    <ann chan="person_first_nam">0</ann>
+    <ann chan="person_last_nam">0</ann>
+    <ann chan="city_nam" head="1">1</ann>
    </tok>
    <ns/>
    <tok>
     <orth>.</orth>
     <lex disamb="1"><base>.</base><ctag>interp</ctag></lex>
-    <ann chan="nam_liv">0</ann>
-    <ann chan="nam_loc">0</ann>
+    <ann chan="person_first_nam">0</ann>
+    <ann chan="person_last_nam">0</ann>
+    <ann chan="city_nam">0</ann>
    </tok>
   </sentence>
  </chunk>
diff --git a/requirements.txt b/requirements.txt
index f7260eb14a49250794190d05e53a7b199e06a39d..3923df958b56e873598b5d291a1f3373d3d93824 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
 --index-url https://pypi.clarin-pl.eu/simple/
 nlp-ws
 regex==2020.10.28
-Babel==2.8.0
\ No newline at end of file
+Babel==2.8.0
+bitarray==2.6.1
\ No newline at end of file
diff --git a/src/anonymizers/english_anonymizer.py b/src/anonymizers/english_anonymizer.py
index 61f29b1bedc60cdaec68b7fa3a8026cc2debb3b3..0961c241fac3d1c058056881498226d9a558552c 100644
--- a/src/anonymizers/english_anonymizer.py
+++ b/src/anonymizers/english_anonymizer.py
@@ -5,7 +5,7 @@ import random
 import regex
 
 
-from src.utils import consume
+from src.utils.utils import consume
 from src.ccl_handler import CCLHandler
 from src.base_anonymizer import BaseAnonymizer
 from src.generators import (generate_pseudo_email, generate_pseudo_phone_number,
diff --git a/src/anonymizers/polish_anonymizer.py b/src/anonymizers/polish_anonymizer.py
index 60f9c50a3f43c27d85d9020f4d69e0366e029444..f725254b8e444b1cbcf8d9a2a1fdabdfbfd8958c 100644
--- a/src/anonymizers/polish_anonymizer.py
+++ b/src/anonymizers/polish_anonymizer.py
@@ -4,7 +4,7 @@ import regex
 import random
 
 
-from src.utils import consume
+from src.utils.utils import consume
 from src.base_anonymizer import BaseAnonymizer
 from src.ccl_handler import CCLHandler
 from src.generators import (generate_pseudo_email, generate_pseudo_phone_number,
diff --git a/src/anonymizers/russian_anonymizer.py b/src/anonymizers/russian_anonymizer.py
index 32c0a917433a87804ed4cba9169d5a57376d5d5b..d9e6c073938ddab83583726054deaf5505cb0fdc 100644
--- a/src/anonymizers/russian_anonymizer.py
+++ b/src/anonymizers/russian_anonymizer.py
@@ -5,7 +5,7 @@ import random
 import regex
 
 
-from src.utils import consume
+from src.utils.utils import consume
 from src.ccl_handler import CCLHandler
 from src.base_anonymizer import BaseAnonymizer
 from src.generators import (generate_pseudo_email, generate_pseudo_phone_number,
diff --git a/src/ccl_parser.py b/src/ccl_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..41e69716e0d96ecb5d486d4fb27aed0510c16ba9
--- /dev/null
+++ b/src/ccl_parser.py
@@ -0,0 +1,70 @@
+from typing import Dict, Any, List, Tuple
+from lxml import etree
+from collections import defaultdict
+
+def parse_ccl(ccl: str) -> Tuple[str, Dict[str, List[Tuple[int, int, str]]]]:
+    """
+    Parses CCL XML format and returns original text and annotations.
+    
+    Annotations are returned as a dictionary with keys being annotation channels
+    and values being a list of tuples (start, end, word) where:
+    * start is an index of the first character in the word
+    * end is an index of the last character in the word
+    * word is a word or a group of words (in case of multiword tokens)
+    
+    :param ccl: CCL XML
+    :return: (text, annotations)
+    """
+    ccl_tree = etree.fromstring(ccl.strip().encode('utf-8'))
+    
+    results = defaultdict(list)
+    text = ""
+    
+    # First token is assumed to not have space before it
+    last_was_ns = True
+    
+    tokens = ccl_tree.xpath("//ns | //tok")
+    for token in tokens:                
+        if token.tag == 'tok':
+            if not last_was_ns:
+                text += " "
+
+            word = token.xpath('./orth')[0].text
+            start = len(text)
+            end = start + len(word)
+
+            for lex in token.xpath('./lex'):
+                if lex.attrib['disamb'] == "1":
+                    ctag = lex.xpath('./ctag')[0]
+                    results["ctag"].append((start, end, ctag.text))
+                    
+                    break
+                
+            for ann in token.xpath('./ann'):
+                is_present = int(ann.text) == 1
+                if not is_present:
+                    continue
+                
+                channel = ann.attrib['chan']
+                is_head = "head" in ann.attrib and ann.attrib['head'] == "1"
+                
+                if is_head:
+                    results[channel].append((start, end, word))
+                else:
+                    if last_was_ns:
+                        new_word = results[channel][-1][2] + word
+                    else:
+                        new_word = results[channel][-1][2] + " " + word
+                        
+                    old_start = results[channel][-1][0]
+                        
+                    results[channel][-1] = (old_start, end, new_word)
+                        
+            last_was_ns = False
+            text += word
+        elif token.tag == 'ns':
+            last_was_ns = True
+            
+    return text, results
+            
+    
\ No newline at end of file
diff --git a/src/detectors/date/date.py b/src/detectors/date/date.py
index 85e34b4dac03d9adfd0a32d679a50ce2ba0dce21..2f1f132c0323503d04afe8d59791373449b3687d 100644
--- a/src/detectors/date/date.py
+++ b/src/detectors/date/date.py
@@ -10,7 +10,7 @@ def find_dates(text: str, language: str = "en") -> List[Tuple[int, int, str]]:
     :type text: str
     :param language: the language of the text
     :type language: str
-    :return: a list of tuples containing (start, end, detected_date)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     
diff --git a/src/detectors/date/en.py b/src/detectors/date/en.py
index 594e663cb67fed4573da82741d2353c2aff7861d..a716bc1861cb07bf6c5c91da3dfed8555b80938c 100644
--- a/src/detectors/date/en.py
+++ b/src/detectors/date/en.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 EN_DATES_REGEX = re.compile(
     r'\b(?P<day_or_month_year>'
@@ -26,11 +27,11 @@ def detect_dates_en(text: str) -> List[Tuple[int, int, str]]:
     Detects English dates in the text.
     :param text: the text to be searched
     :type text: str
-    :return: a list of tuples containing (start, end, detected_date)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = EN_DATES_REGEX.finditer(text)
     dates = []
     for match in matches:
-        dates.append((match.start(), match.end(), match.group()))
+        dates.append((match.start(), match.end(), EntityTypes.DATE))
     return dates
\ No newline at end of file
diff --git a/src/detectors/date/pl.py b/src/detectors/date/pl.py
index 7001b9f5b82f622dec9a03ca370740cc4a2ae9c0..02abfddbcc349919ce19319e1c925ba0d5683407 100644
--- a/src/detectors/date/pl.py
+++ b/src/detectors/date/pl.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 PL_DATES_REGEX = re.compile(
     r'\b(?P<day_or_month_year>'
@@ -29,11 +30,11 @@ def detect_dates_pl(text: str) -> List[Tuple[int, int, str]]:
     Detects Polish dates in the text.
     :param text: the text to be searched
     :type text: str
-    :return: a list of tuples containing (start, end, detected_date)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = PL_DATES_REGEX.finditer(text)
     dates = []
     for match in matches:
-        dates.append((match.start(), match.end(), match.group()))
+        dates.append((match.start(), match.end(), EntityTypes.DATE))
     return dates
\ No newline at end of file
diff --git a/src/detectors/date/ru.py b/src/detectors/date/ru.py
index 91017c8c32e44cfd66d8bb94692cd207e6551309..4100717c1a0c9fb01704f3a76fcc994d6677bf55 100644
--- a/src/detectors/date/ru.py
+++ b/src/detectors/date/ru.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 RU_DATES_REGEX = re.compile(
     r'\b(?P<day_or_month_year>'
@@ -29,11 +30,11 @@ def detect_dates_ru(text: str) -> List[Tuple[int, int, str]]:
     Detects Russian dates in the text.
     :param text: the text to be searched
     :type text: str
-    :return: a list of tuples containing (start, end, detected_date)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = RU_DATES_REGEX.finditer(text)
     dates = []
     for match in matches:
-        dates.append((match.start(), match.end(), match.group()))
+        dates.append((match.start(), match.end(), EntityTypes.DATE))
     return dates
\ No newline at end of file
diff --git a/src/detectors/email/email.py b/src/detectors/email/email.py
index a0637ec67652440e3e2fb7c961c78ff80d55ecfe..82e1756913f92b75d220b7536058d61db54fbf03 100644
--- a/src/detectors/email/email.py
+++ b/src/detectors/email/email.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 EMAIL_REGEX = re.compile(
     r'(?P<local_part>[a-z0-9!#$%&\'*+/=?^_`{|}~-]+'
@@ -16,11 +17,11 @@ def detect_emails(text: str, language: str) -> List[Tuple[int, int, str]]:
     :type text: str
     :param language: the language of the text
     :type language: str
-    :return: a list of tuples containing (start, end, detected_email)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = EMAIL_REGEX.finditer(text)
     emails = []
     for match in matches:
-        emails.append((match.start(), match.end(), match.group()))
+        emails.append((match.start(), match.end(), EntityTypes.EMAIL))
     return emails
\ No newline at end of file
diff --git a/src/detectors/ner/__init__.py b/src/detectors/ner/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f8aefd5cd9364517e23a316e81a484529604695
--- /dev/null
+++ b/src/detectors/ner/__init__.py
@@ -0,0 +1 @@
+from src.detectors.ner.ner import detect_ner
\ No newline at end of file
diff --git a/src/detectors/ner/ner.py b/src/detectors/ner/ner.py
new file mode 100644
index 0000000000000000000000000000000000000000..18c562257735af09e3793cd4fb5484aa2be95dca
--- /dev/null
+++ b/src/detectors/ner/ner.py
@@ -0,0 +1,8 @@
+from typing import List, Tuple
+from src.detectors.ner.pl_liner_n5 import detect_ner_pl_liner_n5
+
+def detect_ner(ccl_annotations, language) -> List[Tuple[int, int, str]]:
+    if language == 'pl':
+        return detect_ner_pl_liner_n5(ccl_annotations)
+    else:
+        raise NotImplementedError
\ No newline at end of file
diff --git a/src/detectors/ner/pl_liner_n5.py b/src/detectors/ner/pl_liner_n5.py
new file mode 100644
index 0000000000000000000000000000000000000000..c494d13726aaeaec3e6e181c0285416575bbee1e
--- /dev/null
+++ b/src/detectors/ner/pl_liner_n5.py
@@ -0,0 +1,33 @@
+from typing import List, Tuple, Dict
+from src.utils.utils import subdict
+from src.entity_types import EntityTypes
+from src.utils.ner_pl_n5_mapping import NER_PL_N5_MAPPING
+
+def detect_ner_pl_liner_n5(
+    ccl_annotations: Dict[str, List[Tuple[int, int, str]]]
+) -> List[Tuple[int, int, str]]:
+    """
+    Detects ner entities in the text based on liner_n5 NER ontology.
+
+    :param ner_annotations: a dictionary of NER annotations
+    :type ner_annotations: Dict[str, List[Tuple[int, int, str]]]
+    :return: a list of tuples containing (start, end, entity_type)
+    :rtype: List[Tuple[int, int, str]]
+    """
+    names = subdict(
+        ccl_annotations,
+        [
+            "nam_liv_person",
+            "nam_liv_person_last",
+            "nam_fac_road",
+            "nam_loc_gpe_city",
+            "nam_org_group_team",
+        ],
+        all_must_be_present=False,
+    )
+
+    return [
+        (start, end, NER_PL_N5_MAPPING.get(entity_type, EntityTypes.OTHER))
+        for entity_type, entity in names.items()
+        for start, end, _ in entity
+    ]
diff --git a/src/detectors/phone/phone.py b/src/detectors/phone/phone.py
index 49abeb51e8467aa5beec9e579ca0bda1ffd88704..8ab3d65d1f1ca1125161fb16e09336476040bb68 100644
--- a/src/detectors/phone/phone.py
+++ b/src/detectors/phone/phone.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 PHONE_NUMBER_REGEX = re.compile(
     r'(?P<country_code>(00[1-9]\d?)|(\(?([+\d]{2,3})\)?)[- ]??)?'
@@ -14,11 +15,11 @@ def detect_phone_numbers(text: str, language: str) -> List[Tuple[int, int, str]]
     :type text: str
     :param language: the language of the text
     :type language: str
-    :return: a list of tuples containing (start, end, detected_date)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = PHONE_NUMBER_REGEX.finditer(text)
     phone_numbers = []
     for match in matches:
-        phone_numbers.append((match.start(), match.end(), match.group()))
+        phone_numbers.append((match.start(), match.end(), EntityTypes.PHONE_NUMBER))
     return phone_numbers
\ No newline at end of file
diff --git a/src/detectors/url/url.py b/src/detectors/url/url.py
index 2ca1fec60b6c964670c3f41f7e659796390df6cb..70b8ba801f55062cdb26d94a1a2abac99df057d1 100644
--- a/src/detectors/url/url.py
+++ b/src/detectors/url/url.py
@@ -2,6 +2,7 @@ import regex as re
 from typing import List, Tuple
 from .pl import URL_REGEX_PL
 from .common import generate_url_regex
+from src.entity_types import EntityTypes
 
 def detect_urls(text: str, language: str) -> List[Tuple[int, int, str]]:
     """
@@ -10,7 +11,7 @@ def detect_urls(text: str, language: str) -> List[Tuple[int, int, str]]:
     :type text: str
     :param language: the language of the text
     :type language: str
-    :return: a list of tuples containing (start, end, detected_url)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     if language == "pl":
@@ -21,6 +22,6 @@ def detect_urls(text: str, language: str) -> List[Tuple[int, int, str]]:
     matches = url_regex.finditer(text)
     urls = []
     for match in matches:
-        urls.append((match.start(), match.end(), match.group()))
+        urls.append((match.start(), match.end(), EntityTypes.URL))
         
     return urls
\ No newline at end of file
diff --git a/src/detectors/user/user.py b/src/detectors/user/user.py
index 4d8f0352540682829b8cd154e227c48c38110e90..d588a25aa70565b426df4f65a18b5416b2504d4a 100644
--- a/src/detectors/user/user.py
+++ b/src/detectors/user/user.py
@@ -1,5 +1,6 @@
 import regex as re
 from typing import List, Tuple
+from src.entity_types import EntityTypes
 
 USER_REGEX = re.compile(r'\B(?P<username>\@[\w\-]+)')
 
@@ -10,11 +11,11 @@ def detect_users(text: str, language: str) -> List[Tuple[int, int, str]]:
     :type text: str
     :param language: the language of the text
     :type language: str
-    :return: a list of tuples containing (start, end, detected_user)
+    :return: a list of tuples containing (start, end, entity_type)
     :rtype: List[Tuple[int, int, str]]
     """
     matches = USER_REGEX.finditer(text)
     users = []
     for match in matches:
-        users.append((match.start(), match.end(), match.group()))
+        users.append((match.start(), match.end(), EntityTypes.USER))
     return users
\ No newline at end of file
diff --git a/src/dictionaries/__init__.py b/src/dictionaries/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dictionaries/pl_ner_replacements.py b/src/dictionaries/pl_ner_replacements.py
new file mode 100644
index 0000000000000000000000000000000000000000..77e7e87803547124235f4eea05d3ba88d9a96d8e
--- /dev/null
+++ b/src/dictionaries/pl_ner_replacements.py
@@ -0,0 +1,46 @@
+from typing import Dict, List, Optional
+from collections import defaultdict
+from src.entity_types import EntityTypes
+
+def load_pl_ner_replacements_dictionary(path: str, ner_mapping: Optional[Dict[str, str]] = None) -> Dict[str, Dict[str, Dict[str, str]]]:
+    """
+    Loads a dictionary that maps named entity tags to lemmas to part-of-speech tags to words.
+    
+    The dictionary is a nested defaultdict, so if a key is not found, an empty defaultdict is returned.
+    
+    The dictionary is stored in a tab-separated file, where each line has the following format:
+    
+    <ner_tag> <word> <lemma> <pos_tag>
+    
+    Example:
+    
+    OSOBA Andrzejowi Andrzej subst:sg:dat:m1
+    OSOBA Andrzej Andrzej subst:sg:m1:imperf
+    OSOBA Kasia Kasia subst:sg:f:imperf
+    MIEJSCE Wrocław Wrocław subst:sg:m2:imperf
+    MIEJSCE Warszawa Warszawa subst:sg:f:imperf
+    MIEJSCE Kraków Kraków subst:sg:m2:imperf
+    
+    Parameters
+    ----------
+    path : str
+        Path to the dictionary file.
+    
+    Returns
+    -------
+    Dict[str, Dict[str, Dict[str, str]]]
+        Nested defaultdict that maps named entity tags to lemmas to part-of-speech tags to words.
+    """
+    
+    replacement_dictionary = defaultdict(lambda: defaultdict(dict))
+    with open(path, "r", encoding="utf-8") as file:
+        for line in file:
+            line = line.strip()
+            ner_tag, word, lemma, pos_tag = line.split("\t")
+            
+            if ner_mapping is not None:
+                ner_tag = ner_mapping.get(ner_tag, EntityTypes.OTHER)
+                        
+            replacement_dictionary[ner_tag][lemma][pos_tag] = word
+            
+    return replacement_dictionary
\ No newline at end of file
diff --git a/src/entity_types.py b/src/entity_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed0496ba91e6f47ec9d3f54ad76aa3003aa45300
--- /dev/null
+++ b/src/entity_types.py
@@ -0,0 +1,14 @@
+class EntityTypes:
+    NAME = "name"
+    SURNAME = "surname"
+    STREET_NAME = "street_name"
+    CITY = "city"
+    COUNTRY = "country"
+    PHONE_NUMBER = "phone_number"
+    URL = "url"
+    USER = "user"
+    EMAIL = "email"
+    DATE = "date"
+    TIN = "tin" # Tax Identification Number
+    KRS = "krs" # National Court Register
+    OTHER = "other"
\ No newline at end of file
diff --git a/src/string_replacements.py b/src/string_replacements.py
new file mode 100644
index 0000000000000000000000000000000000000000..33c426db21a3f3d6062a517537e8778d08a90f6c
--- /dev/null
+++ b/src/string_replacements.py
@@ -0,0 +1,27 @@
+from typing import List, Tuple
+
+def replace(original_string: str, replacements: List[Tuple[int, int, str]]):
+    """
+    Replaces substrings in a string.
+    
+    Parameters
+    ----------
+    original_string : str
+        The original string.
+    replacements : List[Tuple[int, int, str]]
+        A list of tuples containing (start, end, replacement).
+    
+    Returns
+    -------
+    str
+        The string with replacements applied.
+    """
+    
+    replacements = sorted(replacements, key=lambda x: x[0])
+    
+    delta = 0
+    for replacement in replacements:
+        original_string = original_string[:replacement[0] + delta] + replacement[2] + original_string[replacement[1] + delta:]
+        delta += len(replacement[2]) - (replacement[1] - replacement[0])
+        
+    return original_string
\ No newline at end of file
diff --git a/src/suppressors/__init__.py b/src/suppressors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9cc16fb2c596e511388d7cee8698c89f1f44989
--- /dev/null
+++ b/src/suppressors/__init__.py
@@ -0,0 +1 @@
+from src.suppressors.order_based import suppress_order_based
\ No newline at end of file
diff --git a/src/suppressors/order_based.py b/src/suppressors/order_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..8488465d3711469c466d5aaf161f35cab8686633
--- /dev/null
+++ b/src/suppressors/order_based.py
@@ -0,0 +1,27 @@
+from typing import List, Tuple, Dict
+from bitarray import bitarray
+
+def suppress_order_based(annotations: List[Tuple[int, int, str]]) -> List[Tuple[int, int, str]]:
+    """If two annotations overlap, the first one int the list is kept.
+
+    Args:
+        annotations (List[Tuple[int, int, str]]): List of annotations.
+
+    Returns:
+        List[Tuple[int, int, str]]: List of annotations with overlapping
+            annotations removed.
+
+    """
+    annotations = annotations
+    bitarray_size = max([end for _, end, _ in annotations])
+    bitarray_ = bitarray(bitarray_size)
+    bitarray_.setall(False)
+    
+    result = []
+    
+    for start, end, entity_type in annotations:
+        if not bitarray_[start:end].any():
+            bitarray_[start:end] = True
+            result.append((start, end, entity_type))
+            
+    return result
diff --git a/src/tag_anonimization.py b/src/tag_anonimization.py
new file mode 100644
index 0000000000000000000000000000000000000000..89e1a10ba44a1a71f504244f725d33ee0dadd8c8
--- /dev/null
+++ b/src/tag_anonimization.py
@@ -0,0 +1,40 @@
+from typing import List, Tuple
+from collections import defaultdict
+from src.entity_types import EntityTypes
+from src.string_replacements import replace
+
+def replace_with_tags(text: str, detections: List[Tuple[int, int, str]]) -> str:
+    """Replace entities with tags.
+    
+    Args:
+        text (str): Text to be processed.
+        detections (List[Tuple[int, int, str]]): List of detections.
+    
+    Returns:
+        str: Text with entities replaced with tags.
+    
+    """
+    
+    tags_map = {
+        EntityTypes.NAME: "[OSOBA]",
+        EntityTypes.SURNAME: "[OSOBA]",
+        EntityTypes.STREET_NAME: "[MIEJSCE]",
+        EntityTypes.CITY: "[MIEJSCE]",
+        EntityTypes.COUNTRY: "[MIEJSCE]",
+        EntityTypes.PHONE_NUMBER: "[DIGITS]",
+        EntityTypes.URL: "[WWW]",
+        EntityTypes.USER: "@[USER]",
+        EntityTypes.EMAIL: "[MAIL]",
+        EntityTypes.DATE: "[DATE]",
+        EntityTypes.TIN: "[DIGITS]",
+        EntityTypes.KRS: "[DIGITS]",
+    }
+    
+    result = [
+        (start, end, tags_map.get(entity_type, "[OTHER]"))
+        for start, end, entity_type in detections
+    ]
+    
+    return replace(text, result)
+    
+    
\ No newline at end of file
diff --git a/src/utils.py b/src/utils.py
deleted file mode 100644
index 81cc67f0bde02bc81b366edbbee4faf16f02029a..0000000000000000000000000000000000000000
--- a/src/utils.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Module for useful functions."""
-
-import itertools
-
-
-def consume(iterative, n):
-    """Consume n elements from iterative object.
-
-    Args:
-        iterative (iter): Python iterative object.
-        n (int): Number of elements to consume.
-
-    """
-    next(itertools.islice(iterative, n - 1, n), None)
diff --git a/src/utils/__init__.py b/src/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8b0bd10ec3feb7dde48cd57a3048aaa6149d86c
--- /dev/null
+++ b/src/utils/__init__.py
@@ -0,0 +1 @@
+from src.utils.utils import *
\ No newline at end of file
diff --git a/src/utils/ner_pl_n5_mapping.py b/src/utils/ner_pl_n5_mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b857b5d95fa3cad6ba4f2355cc34ba18ed91729
--- /dev/null
+++ b/src/utils/ner_pl_n5_mapping.py
@@ -0,0 +1,9 @@
+from src.entity_types import EntityTypes
+
+NER_PL_N5_MAPPING = {
+    "nam_liv_person": EntityTypes.NAME,
+    "nam_liv_person_last": EntityTypes.SURNAME,
+    "nam_fac_road": EntityTypes.STREET_NAME,
+    "nam_loc_gpe_city": EntityTypes.CITY,
+    "nam_org_group_team": EntityTypes.COUNTRY,
+}
\ No newline at end of file
diff --git a/src/utils/utils.py b/src/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0035e6bb50275157ca80734a3f4a92990070dc1
--- /dev/null
+++ b/src/utils/utils.py
@@ -0,0 +1,33 @@
+"""Module for useful functions."""
+
+import itertools
+
+
+def consume(iterative, n):
+    """Consume n elements from iterative object.
+
+    Args:
+        iterative (iter): Python iterative object.
+        n (int): Number of elements to consume.
+
+    """
+    next(itertools.islice(iterative, n - 1, n), None)
+
+
+def subdict(dictionary, keys, all_must_be_present=True):
+    """Return a subdictionary of dictionary containing only keys.
+
+    Args:
+        dictionary (dict): Dictionary to take a subdictionary from.
+        keys (list): List of keys to take from dictionary.
+        all_must_be_present (bool): If True, all keys must be present in
+            dictionary. If False, only keys that are present are returned.
+
+    Returns:
+        dict: Subdictionary of dictionary containing only keys.
+
+    """
+    if all_must_be_present:
+        return {key: dictionary[key] for key in keys}
+    else:
+        return {key: dictionary[key] for key in keys if key in dictionary}
\ No newline at end of file
diff --git a/tests/detectors/date/test_en.py b/tests/detectors/date/test_en.py
index 429ee2ab0cd6ef71af64494cb34c4b467075323c..8104a8342a11c3c86dd03ca42034fcfa89016a55 100644
--- a/tests/detectors/date/test_en.py
+++ b/tests/detectors/date/test_en.py
@@ -1,16 +1,17 @@
 from src.detectors.date.en import detect_dates_en
+from src.entity_types import EntityTypes
+
 
 def test_detect_dates_en():
     # Check en-us
     text = "On 1.01.2022, I sold my cat. On April 5, 2021, I bought a dog."
     found_dates = detect_dates_en(text)
-    
-    assert found_dates == [(3,12,"1.01.2022"), (32,45, "April 5, 2021")]
-    
+
+    assert found_dates == [(3, 12, EntityTypes.DATE), (32, 45, EntityTypes.DATE)]
+
     # Check en-gb
     # TODO: Following test fails. Fix it.
     # text = "On 1.01.2022 I sold the cat. On 5th April 2021 I bought a dog."
     # found_dates = detect_dates_en(text)
-    
-    # assert found_dates == [(3,12,"1.01.2022"), (32,46, "5th April 2021")]
-    
\ No newline at end of file
+
+    # assert found_dates == [(3,12, EntityTypes.DATE), (32,46, EntityTypes.DATE)]
diff --git a/tests/detectors/date/test_pl.py b/tests/detectors/date/test_pl.py
index a441c3684f7a6fbf7721c5a4b79854f4f36a4106..2942163846888a02c5aa1eea8d7f5b52e19d3c6b 100644
--- a/tests/detectors/date/test_pl.py
+++ b/tests/detectors/date/test_pl.py
@@ -1,7 +1,9 @@
 from src.detectors.date.pl import detect_dates_pl
+from src.entity_types import EntityTypes
+
 
 def test_detect_dates_pl():
     text = "W dniu 1.01.2022 sprzedałem kota. 5 kwietnia 2021 roku kupiłem psa."
     found_dates = detect_dates_pl(text)
-    
-    assert found_dates == [(7,16,"1.01.2022"), (34,49, "5 kwietnia 2021")]
\ No newline at end of file
+
+    assert found_dates == [(7, 16, EntityTypes.DATE), (34, 49, EntityTypes.DATE)]
diff --git a/tests/detectors/date/test_ru.py b/tests/detectors/date/test_ru.py
index 44e9805ae7b6b0724cc3e70ee907b9a78a26e86c..5b90d293230e169bf517d36a52485ec4fbd42166 100644
--- a/tests/detectors/date/test_ru.py
+++ b/tests/detectors/date/test_ru.py
@@ -1,7 +1,9 @@
 from src.detectors.date.ru import detect_dates_ru
+from src.entity_types import EntityTypes
+
 
 def test_detect_dates_pl():
     text = "1.01.2022 я продал кошку. 5 апреля 2021 я купил собаку."
     found_dates = detect_dates_ru(text)
-    
-    assert found_dates == [(0,9,"1.01.2022"), (26,39, "5 апреля 2021")]
\ No newline at end of file
+
+    assert found_dates == [(0, 9, EntityTypes.DATE), (26, 39, EntityTypes.DATE)]
diff --git a/tests/detectors/email/test_email.py b/tests/detectors/email/test_email.py
index 05b3e63fc1fc1496e3dd468b6305d29040ff9077..6be224f436d60b6ca662d6d6a1547e19817b322c 100644
--- a/tests/detectors/email/test_email.py
+++ b/tests/detectors/email/test_email.py
@@ -1,7 +1,8 @@
 from src.detectors.email import detect_emails
+from src.entity_types import EntityTypes
 
 def test_detect_emails():
     text = "My email is arkadiusz@borek.pw. My friend's email is arkadiusz.dump@pwr.edu.pl"
     found_emails = detect_emails(text, "en")
     
-    assert found_emails == [(12, 30, "arkadiusz@borek.pw"), (53, 78, "arkadiusz.dump@pwr.edu.pl")]
\ No newline at end of file
+    assert found_emails == [(12, 30, EntityTypes.EMAIL), (53, 78, EntityTypes.EMAIL)]
\ No newline at end of file
diff --git a/tests/detectors/ner/__init__.py b/tests/detectors/ner/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/detectors/ner/test_pl_liner_n5.py b/tests/detectors/ner/test_pl_liner_n5.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab14e4192a7621644d56dfea549daa56d07c0415
--- /dev/null
+++ b/tests/detectors/ner/test_pl_liner_n5.py
@@ -0,0 +1,21 @@
+from src.detectors.ner.pl_liner_n5 import detect_ner_pl_liner_n5
+from src.entity_types import EntityTypes
+
+def test_detect_names_pl_liner_n5():
+    ccl_annotations = {
+        'nam_liv_person': [(10, 16, 'Marian'), (100, 109, 'Magdalena')],
+        'nam_liv_person_last': [(30, 35, 'Nowak')],
+        'nam_loc_gpe_city': [(50, 59, 'Wrocławiu')],
+        'some_other_annotation': [(120, 124, 'zowd')],
+    }
+    
+    result = detect_ner_pl_liner_n5(ccl_annotations)
+    
+    expected = [
+        (10, 16, EntityTypes.NAME), 
+        (100, 109, EntityTypes.NAME),
+        (30, 35, EntityTypes.SURNAME),
+        (50, 59, EntityTypes.CITY),
+    ]
+    
+    assert set(result) == set(expected)
\ No newline at end of file
diff --git a/tests/detectors/phone/test_phone.py b/tests/detectors/phone/test_phone.py
index b2efe2366531a3f183bf06b526343d1b431ed673..733f2630d51a26a051c0ea9d0c7939f31592b261 100644
--- a/tests/detectors/phone/test_phone.py
+++ b/tests/detectors/phone/test_phone.py
@@ -1,7 +1,8 @@
 from src.detectors.phone.phone import detect_phone_numbers
+from src.entity_types import EntityTypes
 
 def test_detect_phone_numbers():
     text = "My phone number is +48 123 456 789. My friend's number is 123456789."
     found_phone_numbers = detect_phone_numbers(text, "en")
     
-    assert found_phone_numbers == [(19, 34, '+48 123 456 789'), (58, 67, '123456789')]
\ No newline at end of file
+    assert found_phone_numbers == [(19, 34, EntityTypes.PHONE_NUMBER), (58, 67, EntityTypes.PHONE_NUMBER)]
\ No newline at end of file
diff --git a/tests/detectors/url/test_url.py b/tests/detectors/url/test_url.py
index ad22f6968833aa58da27d87dfea5ff363aa6f8b8..3d50e4dbc74a4ea10cfc89d8cecdc2052e5dee88 100644
--- a/tests/detectors/url/test_url.py
+++ b/tests/detectors/url/test_url.py
@@ -1,10 +1,11 @@
 from src.detectors.url import detect_urls
+from src.entity_types import EntityTypes
 
 def test_detect_urls():
     text = "This is a test for www.google.com. Make sure to go to https://www.google.com"
     found_urls = detect_urls(text, "en")
     
-    assert found_urls == [(19, 33, 'www.google.com'), (54, 76, 'https://www.google.com')]
+    assert found_urls == [(19, 33, EntityTypes.URL), (54, 76, EntityTypes.URL)]
     
 def test_detect_urls_pl():
     text = "m.in. https://www.google.com"  
@@ -12,5 +13,5 @@ def test_detect_urls_pl():
     found_urls_en = detect_urls(text, "en")
     
     # m.in is a valid shortcut for między innymi in Polish. It should not be detected as a URL.
-    assert found_urls_pl == [(6, 28, 'https://www.google.com')]
-    assert found_urls_en == [(0, 4, "m.in"), (6, 28, 'https://www.google.com')]
\ No newline at end of file
+    assert found_urls_pl == [(6, 28, EntityTypes.URL)]
+    assert found_urls_en == [(0, 4, EntityTypes.URL), (6, 28, EntityTypes.URL)]
\ No newline at end of file
diff --git a/tests/detectors/user/test_user.py b/tests/detectors/user/test_user.py
index b198f71653965c69d101c64d08941c67dad3c844..0ae3c9ed6e4d453410da0b37cf31e42145d83c11 100644
--- a/tests/detectors/user/test_user.py
+++ b/tests/detectors/user/test_user.py
@@ -1,7 +1,8 @@
 from src.detectors.user.user import detect_users
+from src.entity_types import EntityTypes
 
 def test_detect_users():
     text = "My username is @john_smith. My friend's username is @jane_doe."
     found_users = detect_users(text, "en")
     
-    assert found_users == [(15, 26, '@john_smith'), (52, 61, '@jane_doe')]
\ No newline at end of file
+    assert found_users == [(15, 26, EntityTypes.USER), (52, 61, EntityTypes.USER)]
\ No newline at end of file
diff --git a/tests/dictionaries/__init__.py b/tests/dictionaries/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/dictionaries/test_pl_ner_replacements.py b/tests/dictionaries/test_pl_ner_replacements.py
new file mode 100644
index 0000000000000000000000000000000000000000..a694d2edd0a4bafcf08ba22af7ce78d89420dcec
--- /dev/null
+++ b/tests/dictionaries/test_pl_ner_replacements.py
@@ -0,0 +1,38 @@
+from src.dictionaries.pl_ner_replacements import load_pl_ner_replacements_dictionary
+from tempfile import NamedTemporaryFile
+
+def test_load_pl_ner_replacements_dictionary():
+    with NamedTemporaryFile(mode="w", encoding="utf-8", delete=False) as file:
+        file.write("OSOBA\tAndrzejowi\tAndrzej\tsubst:sg:dat:m1\n")
+        file.write("OSOBA\tAndrzej\tAndrzej\tsubst:sg:m1:imperf\n")
+        file.write("OSOBA\tKasia\tKasia\tsubst:sg:f:imperf\n")
+        file.write("MIEJSCE\tWrocław\tWrocław\tsubst:sg:m2:imperf\n")
+        file.write("MIEJSCE\tWarszawa\tWarszawa\tsubst:sg:f:imperf\n")
+        file.write("MIEJSCE\tKraków\tKraków\tsubst:sg:m2:imperf\n")
+        
+        path = file.name
+        
+    dictionary = load_pl_ner_replacements_dictionary(path)
+    
+    assert dictionary == {
+        "OSOBA": {
+            "Andrzej": {
+                "subst:sg:dat:m1": "Andrzejowi",
+                "subst:sg:m1:imperf": "Andrzej"
+            },
+            "Kasia": {
+                "subst:sg:f:imperf": "Kasia"
+            }
+        },
+        "MIEJSCE": {
+            "Wrocław": {
+                "subst:sg:m2:imperf": "Wrocław"
+            },
+            "Warszawa": {
+                "subst:sg:f:imperf": "Warszawa"
+            },
+            "Kraków": {
+                "subst:sg:m2:imperf": "Kraków"
+            }
+        }
+    }
\ No newline at end of file
diff --git a/tests/suppressors/test_order_based.py b/tests/suppressors/test_order_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cf35b9e92809dd12327c9f55fda9bdd9e16459b
--- /dev/null
+++ b/tests/suppressors/test_order_based.py
@@ -0,0 +1,16 @@
+from src.suppressors.order_based import suppress_order_based
+
+def test_supress_order_based():
+    annotations = [
+        (10, 16, "Marian"),
+        (10, 18, "Marianna"),
+        (30, 35, "Nowak"),
+        (50, 59, "Wrocławiu"),
+    ]
+    result = suppress_order_based(annotations)
+    expected = [
+        (10, 16, "Marian"),
+        (30, 35, "Nowak"),
+        (50, 59, "Wrocławiu"),
+    ]
+    assert set(result) == set(expected)
\ No newline at end of file
diff --git a/tests/test_ccl_parser.py b/tests/test_ccl_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..e140edce6a339af382a330542b547a07f378c617
--- /dev/null
+++ b/tests/test_ccl_parser.py
@@ -0,0 +1,60 @@
+from src.ccl_parser import parse_ccl
+
+example_ccl = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE chunkList SYSTEM "ccl.dtd">
+<chunkList>
+ <chunk type="p" id="ch1">
+  <sentence id="s1">
+   <tok>
+    <orth>Marek</orth>
+    <lex disamb="1"><base>Marek</base><ctag>subst:sg:nom:m1</ctag></lex>
+    <lex disamb="1"><base>marek</base><ctag>subst:sg:nom:m1</ctag></lex>
+    <ann chan="nam_liv" head="1">1</ann>
+    <ann chan="nam_loc">0</ann>
+   </tok>
+   <tok>
+    <orth>Kowalski</orth>
+    <lex disamb="1"><base>Kowalski</base><ctag>subst:sg:nom:m1</ctag></lex>
+    <ann chan="nam_liv">1</ann>
+    <ann chan="nam_loc">0</ann>
+   </tok>
+   <tok>
+    <orth>pojechał</orth>
+    <lex disamb="1"><base>pojechać</base><ctag>praet:sg:m1:perf</ctag></lex>
+    <ann chan="nam_liv">0</ann>
+    <ann chan="nam_loc">0</ann>
+   </tok>
+   <tok>
+    <orth>do</orth>
+    <lex disamb="1"><base>do</base><ctag>prep:gen</ctag></lex>
+    <ann chan="nam_liv">0</ann>
+    <ann chan="nam_loc">0</ann>
+   </tok>
+   <tok>
+    <orth>Wrocławia</orth>
+    <lex disamb="1"><base>Wrocław</base><ctag>subst:sg:gen:m3</ctag></lex>
+    <ann chan="nam_liv">0</ann>
+    <ann chan="nam_loc" head="1">1</ann>
+   </tok>
+   <ns/>
+   <tok>
+    <orth>.</orth>
+    <lex disamb="1"><base>.</base><ctag>interp</ctag></lex>
+    <ann chan="nam_liv">0</ann>
+    <ann chan="nam_loc">0</ann>
+   </tok>
+  </sentence>
+ </chunk>
+</chunkList>
+"""
+
+def test_parse_ccl():
+    text, annotations = parse_ccl(example_ccl)
+    
+    assert text == "Marek Kowalski pojechał do Wrocławia."
+    
+    assert set(annotations.keys()) == set(["nam_liv", "nam_loc", "ctag"])
+    
+    assert annotations["nam_liv"] == [(0, 14, "Marek Kowalski")]
+    assert annotations["nam_loc"] == [(27, 36, "Wrocławia")]
+    assert annotations["ctag"] == [(0, 5, "subst:sg:nom:m1"), (6, 14, "subst:sg:nom:m1"), (15, 23, "praet:sg:m1:perf"), (24, 26, "prep:gen"), (27, 36, "subst:sg:gen:m3"), (36, 37, "interp")]
\ No newline at end of file
diff --git a/tests/test_string_replacements.py b/tests/test_string_replacements.py
new file mode 100644
index 0000000000000000000000000000000000000000..f44644d480c918895f39f093e6d11d8ff87234f7
--- /dev/null
+++ b/tests/test_string_replacements.py
@@ -0,0 +1,20 @@
+from src.string_replacements import replace
+
+def test_replace():
+    text = "Ala ma kota"
+    replacements = [(0, 3, "Andrzej"), (7, 11, "psa")]
+    
+    expected = "Andrzej ma psa"
+    
+    result = replace(text, replacements)
+    
+    assert result == expected
+    
+def test_replace_out_of_order():
+    text = "Ala ma kota"
+    replacements = [(7, 11, "psa"), (0, 3, "Andrzej")]
+    
+    expected = "Andrzej ma psa"
+    result = replace(text, replacements)
+    
+    assert result == expected
\ No newline at end of file
diff --git a/tests/test_tag_anonimization.py b/tests/test_tag_anonimization.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bfd374727b7d084b020a805d128a4be3c31ca6e
--- /dev/null
+++ b/tests/test_tag_anonimization.py
@@ -0,0 +1,17 @@
+
+from src.tag_anonimization import replace_with_tags
+from src.entity_types import EntityTypes
+
+def test_replace_with_tags():
+    text = "Ala Brzeszczot urodziła sie 05.05.2005 we Wrocławiu"
+    detections = [
+        (0, 3, EntityTypes.NAME), 
+        (4, 14, EntityTypes.SURNAME),
+        (28, 38, EntityTypes.DATE),
+        (42, 51, EntityTypes.CITY),
+    ]
+    
+    result = replace_with_tags(text, detections)
+    expected = "[OSOBA] [OSOBA] urodziła sie [DATE] we [MIEJSCE]"
+    
+    assert result == expected
\ No newline at end of file