Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
combo
Manage
Activity
Members
Labels
Plan
Issues
20
Issue boards
Milestones
Wiki
Redmine
Code
Merge requests
2
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Syntactic Tools
combo
Merge requests
!42
Release 1.0.5
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
Release 1.0.5
develop
into
master
Overview
0
Commits
4
Pipelines
1
Changes
6
Merged
Mateusz Klimaszewski
requested to merge
develop
into
master
3 years ago
Overview
0
Commits
4
Pipelines
1
Changes
6
Expand
Error while too many tokens are processed by BERT
0
0
Merge request reports
Compare
master
master (base)
and
latest version
latest version
4dc8c92f
4 commits,
3 years ago
6 files
+
66
−
15
Expand all files
Inline
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
6
Search (e.g. *.vue) (Ctrl+P)
combo/data/token_indexers/pretrained_transformer_mismatched_indexer.py
+
33
−
2
Options
from
typing
import
Optional
,
Dict
,
Any
,
List
,
Tuple
from
allennlp
import
data
from
allennlp.data
import
token_indexers
,
tokenizers
from
allennlp.data
import
token_indexers
,
tokenizers
,
IndexedTokenList
,
vocabulary
from
overrides
import
overrides
@data.TokenIndexer.register
(
"
pretrained_transformer_mismatched_fixed
"
)
class
PretrainedTransformerMismatchedIndexer
(
token_indexers
.
PretrainedTransformerMismatchedIndexer
):
"""
TODO(mklimasz) Remove during next allennlp update, fixed on allennlp master.
"""
def
__init__
(
self
,
model_name
:
str
,
namespace
:
str
=
"
tags
"
,
max_length
:
int
=
None
,
tokenizer_kwargs
:
Optional
[
Dict
[
str
,
Any
]]
=
None
,
**
kwargs
)
->
None
:
@@ -24,6 +24,37 @@ class PretrainedTransformerMismatchedIndexer(token_indexers.PretrainedTransforme
self
.
_num_added_start_tokens
=
self
.
_matched_indexer
.
_num_added_start_tokens
self
.
_num_added_end_tokens
=
self
.
_matched_indexer
.
_num_added_end_tokens
@overrides
def
tokens_to_indices
(
self
,
tokens
,
vocabulary
:
vocabulary
)
->
IndexedTokenList
:
"""
Method is overridden in order to raise an error while the number of tokens needed to embed a sentence exceeds the
maximal input of a model.
"""
self
.
_matched_indexer
.
_add_encoding_to_vocabulary_if_needed
(
vocabulary
)
wordpieces
,
offsets
=
self
.
_allennlp_tokenizer
.
intra_word_tokenize
(
[
t
.
ensure_text
()
for
t
in
tokens
])
if
len
(
wordpieces
)
>
self
.
_tokenizer
.
max_len_single_sentence
:
raise
ValueError
(
"
Following sentence consists of more wordpiece tokens that the model can process:
\n
"
+
\
"
"
.
join
([
str
(
x
)
for
x
in
tokens
[:
10
]])
+
"
...
\n
"
+
\
f
"
Maximal input:
{
self
.
_tokenizer
.
max_len_single_sentence
}
\n
"
+
\
f
"
Current input:
{
len
(
wordpieces
)
}
"
)
offsets
=
[
x
if
x
is
not
None
else
(
-
1
,
-
1
)
for
x
in
offsets
]
output
:
IndexedTokenList
=
{
"
token_ids
"
:
[
t
.
text_id
for
t
in
wordpieces
],
"
mask
"
:
[
True
]
*
len
(
tokens
),
# for original tokens (i.e. word-level)
"
type_ids
"
:
[
t
.
type_id
for
t
in
wordpieces
],
"
offsets
"
:
offsets
,
"
wordpiece_mask
"
:
[
True
]
*
len
(
wordpieces
),
# for wordpieces (i.e. subword-level)
}
return
self
.
_matched_indexer
.
_postprocess_output
(
output
)
class
PretrainedTransformerIndexer
(
token_indexers
.
PretrainedTransformerIndexer
):