Skip to content
Snippets Groups Projects
Commit fb6d8f1b authored by Michał Pogoda's avatar Michał Pogoda
Browse files

Merge branch 'better_coverage' into 'develop'

Fix infancy erorrs based on Magdalena's report

See merge request !9
parents 4243529d e745a827
No related branches found
No related tags found
2 merge requests!10Anonimizer v2,!9Fix infancy erorrs based on Magdalena's report
Pipeline #9217 passed
Showing
with 37634 additions and 38 deletions
...@@ -7,6 +7,11 @@ Anonymizer works in 3 modes, when sensitive data is detected, it can perform ope ...@@ -7,6 +7,11 @@ Anonymizer works in 3 modes, when sensitive data is detected, it can perform ope
- tag - sensitive data is replaced by the category tag it belongs to - tag - sensitive data is replaced by the category tag it belongs to
- pseudo (pseudonymization) - sensitive data is replaced by another object in the same category - pseudo (pseudonymization) - sensitive data is replaced by another object in the same category
## Running from cli
```bash
python3 cli.py example_inputs/wiktorner_jsonl output.json --configuration wiktorner_jsonl --language pl --replace-method tag
```
## How it works? ## How it works?
Anonymizer is a pipeline of modules. The overall pipeline is as follows: Anonymizer is a pipeline of modules. The overall pipeline is as follows:
...@@ -33,3 +38,4 @@ The project uses hydra for configuration. You can find the configuration files i ...@@ -33,3 +38,4 @@ The project uses hydra for configuration. You can find the configuration files i
Liner2 should use model 5nam. Liner2 should use model 5nam.
tekst->any2txt->morphodita->liner2->anonymizer tekst->any2txt->morphodita->liner2->anonymizer
date: date:
_target_: src.detectors.date.DateDetector _target_: src.detectors.date.DateDetector
language: $language language: ${language}
\ No newline at end of file \ No newline at end of file
...@@ -7,5 +7,76 @@ ner: ...@@ -7,5 +7,76 @@ ner:
"nam_fac_road": "street_name" "nam_fac_road": "street_name"
"nam_loc_gpe_city": "city" "nam_loc_gpe_city": "city"
"nam_loc_gpe_country": "country" "nam_loc_gpe_country": "country"
"nam_loc_gpe_admin1": "country" # TODO: Implement better mapping for this "nam_loc_gpe_subdivision": "location"
"nam_loc_historical_region": "country" # TODO: Implement better mapping for this "nam_loc_gpe_admin1": "location"
"nam_loc_gpe_district": "location"
"nam_loc_gpe_admin3": "location"
"nam_loc_gpe_admin2": "location"
"nam_loc_gpe_conurbation": "location"
"nam_loc_country_region": "location"
"nam_loc": "location"
"nam_fac_bridge": "location"
"nam_fac_goe": "location"
"nam_loc_land_mountain": "location"
"nam_loc_land_island": "location"
"nam_loc_land": "location"
"nam_loc_land_peak": "location"
"nam_loc_land_continent": "location"
"nam_loc_land_region": "location"
"nam_loc_historical_region": "location"
"nam_fac_park": "location"
"nam_fac_square": "street"
"nam_fac_goe_stop": "street"
"nam_adj_country": "country"
"nam_num_phone": "phone_number"
"nam_num_house": "number"
"nam_liv_person": "name"
"nam_adj_person": "surname"
"nam_adj_city": "city"
"nam_pro_title_document": "title"
"nam_pro_title_book": "title"
"nam_pro_title_article": "title"
"nam_pro_title": "title"
"nam_pro_title_song": "title"
"nam_pro_title_treaty": "title"
"nam_pro_title_album": "title"
"nam_pro_title_tv": "title"
"nam_loc_hydronym": "hydronym"
"nam_loc_hydronym_sea": "hydronym"
"nam_loc_hydronym_lake": "hydronym"
"nam_loc_hydronym_river": "hydronym"
"nam_loc_hydronym_ocean": "hydronym"
"nam_oth_www": "url"
"nam_pro_award": "proper_name"
"nam_pro_media_radio": "proper_name"
"nam_pro_media_tv": "proper_name"
"nam_pro_media_web": "proper_name"
"nam_pro_software_game": "proper_name"
"nam_pro_model_car": "proper_name"
"nam_pro_media": "proper_name"
"nam_pro_vehicle": "proper_name"
"nam_pro_brand": "proper_name"
"nam_pro_media_periodic": "proper_name"
"nam_pro_software": "proper_name"
"nam_pro": "proper_name"
"nam_eve_human_cultural": "proper_name"
"nam_eve_human_holiday": "proper_name"
"nam_eve_human": "proper_name"
"nam_eve": "proper_name"
"nam_eve_human_sport": "proper_name"
"nam_fac_system": "proper_name"
"nam_oth_tech": "proper_name"
"nam_oth_currency": "proper_name"
"nam_oth_position": "proper_name"
"nam_oth": "serial_number"
"nam_oth_data_format": "serial_number"
"nam_oth_license": "serial_number"
"nam_org_group_team": "organization_name"
"nam_org_company": "organization_name"
"nam_org_group": "organization_name"
"nam_org_political_party": "organization_name"
"nam_org_group_band": "organization_name"
"nam_org_nation": "organization_name"
"nam_org_organization_sub": "organization_name"
"nam_org_institution": "organization_name"
"nam_org_organization": "organization_name"
...@@ -2,5 +2,5 @@ ner: ...@@ -2,5 +2,5 @@ ner:
_target_: src.replacers.ner_replacer.NERReplacer _target_: src.replacers.ner_replacer.NERReplacer
dictionary: dictionary:
_target_: src.dictionaries.morphosyntactic.ner_file.NERFileMorphosyntacticDictionary _target_: src.dictionaries.morphosyntactic.ner_file.NERFileMorphosyntacticDictionary
dictionary_path: ${paths.dictionaries_path}/pl_dict.txt dictionary_path: ${paths.dictionaries_path}/pl_ext_dict.txt
\ No newline at end of file
This diff is collapsed.
"""Implementation of anonymizer service.""" """Implementation of anonymizer service."""
import argparse import argparse
from src.worker_old import Worker
from src.anonymizers.polish_anonymizer import PolishAnonymizer from src.anonymizers.polish_anonymizer import PolishAnonymizer
......
from lpmn_client_biz import Connection, IOType, upload, download, Task, delete
import json
lpmn = ["morphodita",
{"posconverter":
{"input_fromat": "ccl", "output_fromat": "json"}
},
"winer"]
_connection = Connection()
FILE = upload(_connection, "test.txt")
task = Task(lpmn, _connection)
output_file_id = task.run(FILE, IOType.FILE, verbose=True)
downloaded = download(_connection, output_file_id, IOType.TEXT)
result = json.loads(downloaded)
with open("test.jsonl","wt", encoding='utf8') as f:
json.dump(result,f, ensure_ascii=False)
...@@ -32,6 +32,35 @@ class SurnameDetection(MorphosyntacticInfoMixin, Detection): ...@@ -32,6 +32,35 @@ class SurnameDetection(MorphosyntacticInfoMixin, Detection):
def __init__(self, morpho_tag: Optional[str] = None) -> None: def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag) super().__init__(morpho_tag=morpho_tag)
class LocationDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "location"
def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag)
class OrganizationNameDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "organization_name"
def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag)
class ProperNameDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "proper_name"
def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag)
class TitleDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "title"
def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag)
class HydronymDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "hydronym"
def __init__(self, morpho_tag: Optional[str] = None) -> None:
super().__init__(morpho_tag=morpho_tag)
class StreetNameDetection(MorphosyntacticInfoMixin, Detection): class StreetNameDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "street_name" TYPE_NAME = "street_name"
...@@ -47,6 +76,7 @@ class CityDetection(MorphosyntacticInfoMixin, Detection): ...@@ -47,6 +76,7 @@ class CityDetection(MorphosyntacticInfoMixin, Detection):
super().__init__(morpho_tag=morpho_tag) super().__init__(morpho_tag=morpho_tag)
class CountryDetection(MorphosyntacticInfoMixin, Detection): class CountryDetection(MorphosyntacticInfoMixin, Detection):
TYPE_NAME = "country" TYPE_NAME = "country"
...@@ -102,6 +132,11 @@ class KRSDetection(Detection): # National Court Register ...@@ -102,6 +132,11 @@ class KRSDetection(Detection): # National Court Register
def __init__(self) -> None: def __init__(self) -> None:
super().__init__() super().__init__()
class SerialNumberDetection(Detection):
TYPE_NAME = "serial_number"
def __init__(self) -> None:
super().__init__()
class OtherDetection(Detection): # Non standard entity class OtherDetection(Detection): # Non standard entity
TYPE_NAME = "other" TYPE_NAME = "other"
......
...@@ -2,7 +2,7 @@ import regex as re ...@@ -2,7 +2,7 @@ import regex as re
from typing import List, Tuple from typing import List, Tuple
from src.detections import DateDetection from src.detections import DateDetection
from src.detectors.date.utils import _parse_date_to_format from src.detectors.date.utils import parse_date_to_format
EN_DATES_REGEX = re.compile( EN_DATES_REGEX = re.compile(
r"\b(?P<day_or_month_year>" r"\b(?P<day_or_month_year>"
...@@ -35,6 +35,6 @@ def detect_dates_en(text: str) -> List[Tuple[int, int, DateDetection]]: ...@@ -35,6 +35,6 @@ def detect_dates_en(text: str) -> List[Tuple[int, int, DateDetection]]:
matches = EN_DATES_REGEX.finditer(text) matches = EN_DATES_REGEX.finditer(text)
dates = [] dates = []
for match in matches: for match in matches:
format = _parse_date_to_format(match.groupdict()) date_format = parse_date_to_format(match.groupdict())
dates.append((match.start(), match.end(), DateDetection(format))) dates.append((match.start(), match.end(), DateDetection(date_format)))
return dates return dates
...@@ -2,7 +2,7 @@ import regex as re ...@@ -2,7 +2,7 @@ import regex as re
from typing import List, Tuple from typing import List, Tuple
from src.detections import DateDetection from src.detections import DateDetection
from src.detectors.date.utils import _parse_date_to_format from src.detectors.date.utils import parse_date_to_format
PL_DATES_REGEX = re.compile( PL_DATES_REGEX = re.compile(
r"\b(?P<day_or_month_year>" r"\b(?P<day_or_month_year>"
...@@ -26,6 +26,10 @@ PL_DATES_REGEX = re.compile( ...@@ -26,6 +26,10 @@ PL_DATES_REGEX = re.compile(
re.I, re.I,
) )
PL_YEAR_REGEX = re.compile(
r"(?<year>\d+)\s*(?<addon>roku?)"
)
def detect_dates_pl(text: str) -> List[Tuple[int, int, DateDetection]]: def detect_dates_pl(text: str) -> List[Tuple[int, int, DateDetection]]:
""" """
...@@ -35,10 +39,15 @@ def detect_dates_pl(text: str) -> List[Tuple[int, int, DateDetection]]: ...@@ -35,10 +39,15 @@ def detect_dates_pl(text: str) -> List[Tuple[int, int, DateDetection]]:
:return: a list of tuples containing (start, end, annotation) :return: a list of tuples containing (start, end, annotation)
:rtype: List[Tuple[int, int, DateAnnotation]] :rtype: List[Tuple[int, int, DateAnnotation]]
""" """
matches = PL_DATES_REGEX.finditer(text) matches = PL_DATES_REGEX.finditer(text)
dates = [] dates = []
for match in matches: for match in matches:
format = _parse_date_to_format(match.groupdict()) format = parse_date_to_format(match.groupdict())
dates.append((match.start(), match.end(), DateDetection(format))) dates.append((match.start(), match.end(), DateDetection(format)))
for match in PL_YEAR_REGEX.finditer(text):
format = parse_date_to_format(match.groupdict())
dates.append((match.start(), match.end(), DateDetection(format)))
return dates return dates
...@@ -2,7 +2,7 @@ import regex as re ...@@ -2,7 +2,7 @@ import regex as re
from typing import List, Tuple from typing import List, Tuple
from src.detections import DateDetection from src.detections import DateDetection
from src.detectors.date.utils import _parse_date_to_format from src.detectors.date.utils import parse_date_to_format
RU_DATES_REGEX = re.compile( RU_DATES_REGEX = re.compile(
r"\b(?P<day_or_month_year>" r"\b(?P<day_or_month_year>"
...@@ -38,7 +38,7 @@ def detect_dates_ru(text: str) -> List[Tuple[int, int, DateDetection]]: ...@@ -38,7 +38,7 @@ def detect_dates_ru(text: str) -> List[Tuple[int, int, DateDetection]]:
matches = RU_DATES_REGEX.finditer(text) matches = RU_DATES_REGEX.finditer(text)
dates = [] dates = []
for match in matches: for match in matches:
format = _parse_date_to_format(match.groupdict()) format = parse_date_to_format(match.groupdict())
dates.append((match.start(), match.end(), DateDetection(format))) dates.append((match.start(), match.end(), DateDetection(format)))
return dates return dates
...@@ -32,7 +32,7 @@ def _parse_day_or_month(re_entry) -> List[Tuple[int, int, DateDetection]]: ...@@ -32,7 +32,7 @@ def _parse_day_or_month(re_entry) -> List[Tuple[int, int, DateDetection]]:
(DateDetection.AnnotationPart.TWO_DIGIT_MONTH, re_entry["day_month2"]) (DateDetection.AnnotationPart.TWO_DIGIT_MONTH, re_entry["day_month2"])
) )
result.append((DateDetection.AnnotationPart.OTHER, re_entry["punct1"])) result.append((DateDetection.AnnotationPart.OTHER, re_entry["punct2"]))
# elif "day_month2" in re_entry: # elif "day_month2" in re_entry:
# if len(re_entry["day_month2"]) == 1: # if len(re_entry["day_month2"]) == 1:
# result.append( # result.append(
...@@ -157,15 +157,30 @@ def _parse_month_in_words(re_entry) -> List[Tuple[DateDetection.AnnotationPart, ...@@ -157,15 +157,30 @@ def _parse_month_in_words(re_entry) -> List[Tuple[DateDetection.AnnotationPart,
return result return result
def _parse_date_to_format( def _parse_year_only(re_entry) -> List[Tuple[DateDetection.AnnotationPart, str]]:
assert re_entry["addon"] is not None
result = []
if len(re_entry["year"]) == 2:
result.append((DateDetection.AnnotationPart.TWO_DIGIT_YEAR, re_entry["year"]))
else:
result.append((DateDetection.AnnotationPart.FOUR_DIGIT_YEAR, re_entry["year"]))
result.append((DateDetection.AnnotationPart.OTHER, re_entry["addon"]))
return result
def parse_date_to_format(
re_entry, re_entry,
) -> Optional[List[Tuple[DateDetection.AnnotationPart, str]]]: ) -> Optional[List[Tuple[DateDetection.AnnotationPart, str]]]:
if re_entry["day_or_month_year"] is not None: if re_entry.get("day_or_month_year", None) is not None:
result = _parse_day_or_month(re_entry) result = _parse_day_or_month(re_entry)
elif re_entry["year_month_or_day"] is not None: elif re_entry.get("year_month_or_day", None) is not None:
result = _parse_year_month_or_day(re_entry) result = _parse_year_month_or_day(re_entry)
elif re_entry["month_in_words"] is not None: elif re_entry.get("month_in_words", None) is not None:
result = _parse_month_in_words(re_entry) result = _parse_month_in_words(re_entry)
elif re_entry.get("addon", None) is not None:
result = _parse_year_only(re_entry)
else: else:
result = None result = None
......
...@@ -20,7 +20,7 @@ class NerDetector(Detector): ...@@ -20,7 +20,7 @@ class NerDetector(Detector):
for annotation in annotations: for annotation in annotations:
start, end, annotation = annotation start, end, annotation = annotation
if isinstance(annotation, MorphosyntacticAnnotation): if isinstance(annotation, MorphosyntacticAnnotation):
morpho_tags[(start, end)] = annotation.morphosyntactic_tag morpho_tags[start] = annotation.morphosyntactic_tag
elif isinstance(annotation, NerAnnotation): elif isinstance(annotation, NerAnnotation):
ner_type = annotation.ner_type ner_type = annotation.ner_type
...@@ -34,8 +34,8 @@ class NerDetector(Detector): ...@@ -34,8 +34,8 @@ class NerDetector(Detector):
for start, end, ner_detection in ner_detections: for start, end, ner_detection in ner_detections:
kwargs = dict() kwargs = dict()
if issubclass(ner_detection, MorphosyntacticInfoMixin): if issubclass(ner_detection, MorphosyntacticInfoMixin):
if (start, end) in morpho_tags: if start in morpho_tags:
kwargs["morpho_tag"] = morpho_tags[(start, end)] kwargs["morpho_tag"] = morpho_tags[start]
result.append((start, end, ner_detection(**kwargs))) result.append((start, end, ner_detection(**kwargs)))
......
...@@ -4,7 +4,7 @@ from src.detections import NumberDetection ...@@ -4,7 +4,7 @@ from src.detections import NumberDetection
from src.detectors.interface import Detector from src.detectors.interface import Detector
NUMBER_REGEX = re.compile( NUMBER_REGEX = re.compile(
r"\d+[^a-zA-Z\d]*\d*", r"\d+",
re.I, re.I,
) )
...@@ -16,10 +16,10 @@ class NumberDetector(Detector): ...@@ -16,10 +16,10 @@ class NumberDetector(Detector):
def detect( def detect(
self, text: str, annotations: Dict[str, List[Tuple[int, int, Any]]] self, text: str, annotations: Dict[str, List[Tuple[int, int, Any]]]
) -> List[Tuple[int, int, NumberDetection]]: ) -> List[Tuple[int, int, NumberDetection]]:
NUMBER_REGEX.finditer(text)
numbers = [] numbers = []
for number in numbers: for number in NUMBER_REGEX.finditer(text):
numbers.append((number.start(), number.end(), NumberDetection())) numbers.append((number.start(), number.end(), NumberDetection()))
return numbers return numbers
...@@ -44,13 +44,19 @@ class NERFileMorphosyntacticDictionary(MorphosyntacticDictionary): ...@@ -44,13 +44,19 @@ class NERFileMorphosyntacticDictionary(MorphosyntacticDictionary):
if ( if (
original_entry_type_name in self._dictionary original_entry_type_name in self._dictionary
and morpho_tag in self._dictionary[original_entry_type_name]
): ):
if morpho_tag in self._dictionary[original_entry_type_name]:
result = random.choice( result = random.choice(
list( list(
self._dictionary[original_entry_type_name][morpho_tag].values() self._dictionary[original_entry_type_name][morpho_tag].values()
) )
) )
else:
morpho_tag = result = random.choice(list(self._dictionary[original_entry_type_name].keys()))
result = random.choice(
list(self._dictionary[original_entry_type_name][morpho_tag].keys())
)
if result is None and self._always_replace: if result is None and self._always_replace:
random_type = random.choice(list(self._dictionary.keys())) random_type = random.choice(list(self._dictionary.keys()))
......
...@@ -7,9 +7,13 @@ from src.dictionaries.morphosyntactic.ner_file import NERFileMorphosyntacticDict ...@@ -7,9 +7,13 @@ from src.dictionaries.morphosyntactic.ner_file import NERFileMorphosyntacticDict
class NERFileNKJPMorphosyntacticDictionary(NERFileMorphosyntacticDictionary): class NERFileNKJPMorphosyntacticDictionary(NERFileMorphosyntacticDictionary):
def __init__( def __init__(
self, dictionary_path: Optional[str] = None, always_replace=True self,
dictionary_path: Optional[str] = None,
always_replace=True,
remove_first_morpho_subtag=True
) -> None: ) -> None:
super().__init__(dictionary_path, always_replace) super().__init__(dictionary_path, always_replace)
self._remove_first_morpho_subtag = remove_first_morpho_subtag
def get_random_replacement(self, original_entry: Detection) -> Optional[str]: def get_random_replacement(self, original_entry: Detection) -> Optional[str]:
original_entry_type = type(original_entry) original_entry_type = type(original_entry)
...@@ -19,7 +23,10 @@ class NERFileNKJPMorphosyntacticDictionary(NERFileMorphosyntacticDictionary): ...@@ -19,7 +23,10 @@ class NERFileNKJPMorphosyntacticDictionary(NERFileMorphosyntacticDictionary):
if issubclass(original_entry_type, MorphosyntacticInfoMixin): if issubclass(original_entry_type, MorphosyntacticInfoMixin):
# THAT IS A HACK FOR NOW FOR CORRUPTED NKJP TAGS IN DICTIONARY # THAT IS A HACK FOR NOW FOR CORRUPTED NKJP TAGS IN DICTIONARY
if self._remove_first_morpho_subtag:
morpho_tag = ":".join(original_entry.morpho_tag.split(":")[1:]) morpho_tag = ":".join(original_entry.morpho_tag.split(":")[1:])
else:
morpho_tag = original_entry.morpho_tag
if ( if (
original_entry_type_name in self._dictionary original_entry_type_name in self._dictionary
......
...@@ -2,7 +2,6 @@ from typing import Dict, List, Tuple ...@@ -2,7 +2,6 @@ from typing import Dict, List, Tuple
from lxml import etree from lxml import etree
from collections import defaultdict from collections import defaultdict
# from src.annotation_types_old import
from src.input_parsers.interface import InputParser from src.input_parsers.interface import InputParser
from src.annotations import Annotation, MorphosyntacticAnnotation, NerAnnotation from src.annotations import Annotation, MorphosyntacticAnnotation, NerAnnotation
......
...@@ -4,6 +4,9 @@ from src.detections import ( ...@@ -4,6 +4,9 @@ from src.detections import (
NameDetection, NameDetection,
SurnameDetection, SurnameDetection,
StreetNameDetection, StreetNameDetection,
LocationDetection,
OrganizationNameDetection,
ProperNameDetection,
CityDetection, CityDetection,
CountryDetection, CountryDetection,
PhoneNumberDetection, PhoneNumberDetection,
...@@ -12,7 +15,11 @@ from src.detections import ( ...@@ -12,7 +15,11 @@ from src.detections import (
EmailDetection, EmailDetection,
DateDetection, DateDetection,
TINDetection, TINDetection,
TitleDetection,
HydronymDetection,
SerialNumberDetection,
KRSDetection, KRSDetection,
NumberDetection
) )
from src.string_replacements import replace from src.string_replacements import replace
from src.replacers.interface import ReplacerInterface from src.replacers.interface import ReplacerInterface
...@@ -24,15 +31,22 @@ class TagReplacer(ReplacerInterface): ...@@ -24,15 +31,22 @@ class TagReplacer(ReplacerInterface):
NameDetection: "[OSOBA]", NameDetection: "[OSOBA]",
SurnameDetection: "[OSOBA]", SurnameDetection: "[OSOBA]",
StreetNameDetection: "[MIEJSCE]", StreetNameDetection: "[MIEJSCE]",
LocationDetection: "[MIEJSCE]",
CityDetection: "[MIEJSCE]", CityDetection: "[MIEJSCE]",
CountryDetection: "[MIEJSCE]", CountryDetection: "[MIEJSCE]",
PhoneNumberDetection: "[DIGITS]", OrganizationNameDetection: "[ORGANIZACJA]",
ProperNameDetection: "[NAZWA WŁASNA]",
TitleDetection: "[TYTUŁ]",
PhoneNumberDetection: "[CYFRY]",
HydronymDetection: "[NAZWA WODNA]",
UrlDetection: "[WWW]", UrlDetection: "[WWW]",
UserDetection: "@[USER]", UserDetection: "@[USER]",
EmailDetection: "[MAIL]", EmailDetection: "[MAIL]",
DateDetection: "[DATE]", DateDetection: "[DATA]",
TINDetection: "[DIGITS]", TINDetection: "[CYFRY]",
KRSDetection: "[DIGITS]", KRSDetection: "[CYFRY]",
SerialNumberDetection: "[NUMER IDENTYFIKACYJNY]",
NumberDetection: "[CYFRY]"
} }
def replace( def replace(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment