# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import unittest

from transformers.models.mluke.tokenization_mluke import MLukeTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow
from transformers.tokenization_utils_sentencepiece import SentencePieceExtractor

from ...test_tokenization_common import TokenizerTesterMixin


SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
SAMPLE_ENTITY_VOCAB = get_tests_dir("fixtures/test_entity_vocab.json")


# TODO: (Ita / Arthur) FIXME
@unittest.skip("Skip for now as this fails after #40936")
class MLukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
    from_pretrained_id = "studio-ousia/mluke-base"
    tokenizer_class = MLukeTokenizer
    from_pretrained_kwargs = {"cls_token": "<s>"}

    @classmethod
    def setUpClass(cls):
        super().setUpClass()
        cls.from_pretrained_id = "studio-ousia/mluke-base"
        cls.tokenizer_class = MLukeTokenizer

        cls.special_tokens_map = {"entity_token_1": "<ent>", "entity_token_2": "<ent2>"}

    @classmethod
    def get_tokenizer(cls, pretrained_name=None, task=None, **kwargs):
        kwargs.update(cls.special_tokens_map)
        if "task" not in kwargs or task is not None:
            kwargs.update({"task": task})
        # TokenizerTesterMixin passes `pretrained_name` as the first positional argument; keep using fixtures here.

        extractor = SentencePieceExtractor(SAMPLE_VOCAB)
        vocab_ids, vocab_scores, merges = extractor.extract()
        tokenizer = MLukeTokenizer(vocab=vocab_scores, entity_vocab_file=SAMPLE_ENTITY_VOCAB, **kwargs)
        return tokenizer

    def get_input_output_texts(self, tokenizer):
        input_text = "lower newer"
        output_text = "lower newer"
        return input_text, output_text

    def mluke_dict_integration_testing(self):
        tokenizer = self.get_tokenizer()

        self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [35378, 8999, 38])
        self.assertListEqual(
            tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False),
            [35378, 8999, 38, 33273, 11676, 604, 365, 21392, 201, 1819],
        )

    def test_sequence_builders(self):
        tokenizer = self.tokenizer_class.from_pretrained("hf-internal-testing/tiny-random-mluke")

        text = tokenizer.encode("sequence builders", add_special_tokens=False)
        text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)

        encoded_text_from_decode = tokenizer.encode(
            "sequence builders", add_special_tokens=True, add_prefix_space=False
        )
        encoded_pair_from_decode = tokenizer.encode(
            "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
        )

        encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
        encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)

        self.assertEqual(encoded_sentence, encoded_text_from_decode)
        self.assertEqual(encoded_pair, encoded_pair_from_decode)

    def get_clean_sequence(self, tokenizer, max_length=20) -> tuple[str, list]:
        txt = "Beyonce lives in Los Angeles"
        ids = tokenizer.encode(txt, add_special_tokens=False)
        return txt, ids

    @unittest.skip
    def test_pretokenized_inputs(self):
        pass

    def test_padding_entity_inputs(self):
        tokenizer = self.get_tokenizer()

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        span = (15, 34)
        pad_id = tokenizer.entity_vocab["[PAD]"]
        mask_id = tokenizer.entity_vocab["[MASK]"]

        encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True)
        self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]])

        # test with a sentence with no entity
        encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True)
        self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]])

    # def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self):
    #     tokenizer = self.get_tokenizer()

    #     sentence = "ISO 639-3 uses the code fas for the dialects spoken across Iran and Afghanistan."
    #     entities = ["DUMMY"]
    #     spans = [(0, 9)]

    #     with self.assertRaises(ValueError):
    #         tokenizer(sentence, entities=tuple(entities), entity_spans=spans)

    #     with self.assertRaises(TypeError):
    #         tokenizer(sentence, entities=entities, entity_spans=tuple(spans))

    #     with self.assertRaises(ValueError):
    #         tokenizer(sentence, entities=[0], entity_spans=spans)

    #     with self.assertRaises(ValueError):
    #         tokenizer(sentence, entities=entities, entity_spans=[0])

    #     with self.assertRaises(ValueError):
    #         tokenizer(sentence, entities=entities, entity_spans=spans + [(0, 9)])

    @slow
    def test_conversion_reversible(self):
        return super().test_conversion_reversible()

    @slow
    def test_jinja_loopcontrols(self):
        return super().test_jinja_loopcontrols()

    @slow
    def test_pad_token_initialization(self):
        return super().test_pad_token_initialization()


@slow
@require_torch
class MLukeTokenizerIntegrationTests(unittest.TestCase):
    tokenizer_class = MLukeTokenizer
    from_pretrained_kwargs = {"cls_token": "<s>"}

    @classmethod
    def setUpClass(cls):
        cls.tokenizer = MLukeTokenizer.from_pretrained("studio-ousia/mluke-base", return_token_type_ids=True)
        cls.entity_classification_tokenizer = MLukeTokenizer.from_pretrained(
            "studio-ousia/mluke-base", return_token_type_ids=True, task="entity_classification"
        )
        cls.entity_pair_tokenizer = MLukeTokenizer.from_pretrained(
            "studio-ousia/mluke-base", return_token_type_ids=True, task="entity_pair_classification"
        )

        cls.entity_span_tokenizer = MLukeTokenizer.from_pretrained(
            "studio-ousia/mluke-base", return_token_type_ids=True, task="entity_span_classification"
        )

    def test_single_text_no_padding_or_truncation(self):
        tokenizer = self.tokenizer
        sentence = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3", "DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9), (59, 63), (68, 75), (77, 88)]

        encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン ( Afghanistan ).</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][1:5], spaces_between_special_tokens=False), "ISO 639-3"
        )
        self.assertEqual(tokenizer.decode(encoding["input_ids"][17], spaces_between_special_tokens=False), "Iran")
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][19:25], spaces_between_special_tokens=False), "アフガニスタン"
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][26], spaces_between_special_tokens=False), "Afghanistan"
        )

        self.assertEqual(
            encoding["entity_ids"],
            [
                tokenizer.entity_vocab["en:ISO 639-3"],
                tokenizer.entity_vocab["[UNK]"],
                tokenizer.entity_vocab["ja:アフガニスタン"],
                tokenizer.entity_vocab["en:Afghanistan"],
            ],
        )
        self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1, 1])
        self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0, 0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [19, 20, 21, 22, 23, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            ]
        )
        # fmt: on

    def test_single_text_only_entity_spans_no_padding_or_truncation(self):
        tokenizer = self.tokenizer

        sentence = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3", "DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9), (59, 63), (68, 75), (77, 88)]

        encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン ( Afghanistan ).</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][1:5], spaces_between_special_tokens=False), "ISO 639-3"
        )
        self.assertEqual(tokenizer.decode(encoding["input_ids"][17], spaces_between_special_tokens=False), "Iran")
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][20:25], spaces_between_special_tokens=False), "アフガニスタン"
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][26], spaces_between_special_tokens=False), "Afghanistan"
        )

        self.assertEqual(
            encoding["entity_ids"],
            [
                tokenizer.entity_vocab["en:ISO 639-3"],
                tokenizer.entity_vocab["[UNK]"],
                tokenizer.entity_vocab["ja:アフガニスタン"],
                tokenizer.entity_vocab["en:Afghanistan"],
            ],
        )
        self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1, 1])
        self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0, 0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [19, 20, 21, 22, 23, 24, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            ]
        )
        # fmt: on

    def test_single_text_padding_pytorch_tensors(self):
        tokenizer = self.tokenizer

        sentence = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3", "DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9), (59, 63), (68, 75), (77, 88)]

        encoding = tokenizer(
            sentence,
            entities=entities,
            entity_spans=spans,
            return_token_type_ids=True,
            padding="max_length",
            max_length=30,
            max_entity_length=16,
            return_tensors="pt",
        )

        # test words
        self.assertEqual(encoding["input_ids"].shape, (1, 30))
        self.assertEqual(encoding["attention_mask"].shape, (1, 30))
        self.assertEqual(encoding["token_type_ids"].shape, (1, 30))

        # test entities
        self.assertEqual(encoding["entity_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
        self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))

    def test_text_pair_no_padding_or_truncation(self):
        tokenizer = self.tokenizer

        sentence = "ISO 639-3 uses the code fas"
        sentence_pair = "for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3"]
        entities_pair = ["DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9)]
        spans_pair = [(31, 35), (40, 47), (49, 60)]

        encoding = tokenizer(
            sentence,
            sentence_pair,
            entities=entities,
            entities_pair=entities_pair,
            entity_spans=spans,
            entity_spans_pair=spans_pair,
            return_token_type_ids=True,
        )

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> ISO 639-3 uses the code fas</s></s> for the dialects spoken across Iran and アフガニスタン ( Afghanistan"
            " ).</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][1:5], spaces_between_special_tokens=False), "ISO 639-3"
        )
        self.assertEqual(tokenizer.decode(encoding["input_ids"][19], spaces_between_special_tokens=False), "Iran")
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][21:27], spaces_between_special_tokens=False), "アフガニスタン"
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][28], spaces_between_special_tokens=False), "Afghanistan"
        )

        self.assertEqual(
            encoding["entity_ids"],
            [
                tokenizer.entity_vocab["en:ISO 639-3"],
                tokenizer.entity_vocab["[UNK]"],
                tokenizer.entity_vocab["ja:アフガニスタン"],
                tokenizer.entity_vocab["en:Afghanistan"],
            ],
        )
        self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1, 1])
        self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0, 0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [21, 22, 23, 24, 25, 26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [28, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            ]
        )
        # fmt: on

    def test_text_pair_only_entity_spans_no_padding_or_truncation(self):
        tokenizer = self.tokenizer

        sentence = "ISO 639-3 uses the code fas"
        sentence_pair = "for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3"]
        entities_pair = ["DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9)]
        spans_pair = [(31, 35), (40, 47), (49, 60)]

        encoding = tokenizer(
            sentence,
            sentence_pair,
            entities=entities,
            entities_pair=entities_pair,
            entity_spans=spans,
            entity_spans_pair=spans_pair,
            return_token_type_ids=True,
        )

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> ISO 639-3 uses the code fas</s></s> for the dialects spoken across Iran and アフガニスタン ( Afghanistan"
            " ).</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][1:5], spaces_between_special_tokens=False), "ISO 639-3"
        )
        self.assertEqual(tokenizer.decode(encoding["input_ids"][19], spaces_between_special_tokens=False), "Iran")
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][21:27], spaces_between_special_tokens=False), "アフガニスタン"
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][28], spaces_between_special_tokens=False), "Afghanistan"
        )

        self.assertEqual(
            encoding["entity_ids"],
            [
                tokenizer.entity_vocab["en:ISO 639-3"],
                tokenizer.entity_vocab["[UNK]"],
                tokenizer.entity_vocab["ja:アフガニスタン"],
                tokenizer.entity_vocab["en:Afghanistan"],
            ],
        )
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, 2, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [19, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [21, 22, 23, 24, 25, 26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [28, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            ]
        )
        # fmt: on

    def test_text_pair_padding_pytorch_tensors(self):
        tokenizer = self.tokenizer

        sentence = "ISO 639-3 uses the code fas"
        sentence_pair = "for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
        entities = ["en:ISO 639-3"]
        entities_pair = ["DUMMY_ENTITY", "ja:アフガニスタン", "en:Afghanistan"]
        spans = [(0, 9)]
        spans_pair = [(31, 35), (40, 47), (49, 60)]

        encoding = tokenizer(
            sentence,
            sentence_pair,
            entities=entities,
            entities_pair=entities_pair,
            entity_spans=spans,
            entity_spans_pair=spans_pair,
            return_token_type_ids=True,
            padding="max_length",
            max_length=40,
            max_entity_length=16,
            return_tensors="pt",
        )

        # test words
        self.assertEqual(encoding["input_ids"].shape, (1, 40))
        self.assertEqual(encoding["attention_mask"].shape, (1, 40))
        self.assertEqual(encoding["token_type_ids"].shape, (1, 40))

        # test entities
        self.assertEqual(encoding["entity_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
        self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))

    def test_entity_classification_no_padding_or_truncation(self):
        tokenizer = self.entity_classification_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        span = (15, 34)

        encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True)

        # test words
        self.assertEqual(len(encoding["input_ids"]), 23)
        self.assertEqual(len(encoding["attention_mask"]), 23)
        self.assertEqual(len(encoding["token_type_ids"]), 23)
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> Japanese is an<ent>East Asian language<ent>spoken by about 128 million people, primarily in"
            " Japan.</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][4:9], spaces_between_special_tokens=False),
            "<ent>East Asian language<ent>",
        )

        # test entities
        mask_id = tokenizer.entity_vocab["[MASK]"]
        self.assertEqual(encoding["entity_ids"], [mask_id])
        self.assertEqual(encoding["entity_attention_mask"], [1])
        self.assertEqual(encoding["entity_token_type_ids"], [0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [[4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
        )
        # fmt: on

    def test_entity_classification_padding_pytorch_tensors(self):
        tokenizer = self.entity_classification_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        span = (15, 34)

        encoding = tokenizer(
            sentence, entity_spans=[span], return_token_type_ids=True, padding="max_length", return_tensors="pt"
        )

        # test words
        self.assertEqual(encoding["input_ids"].shape, (1, 512))
        self.assertEqual(encoding["attention_mask"].shape, (1, 512))
        self.assertEqual(encoding["token_type_ids"].shape, (1, 512))

        # test entities
        self.assertEqual(encoding["entity_ids"].shape, (1, 1))
        self.assertEqual(encoding["entity_attention_mask"].shape, (1, 1))
        self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 1))
        self.assertEqual(
            encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length)
        )

    def test_entity_pair_classification_no_padding_or_truncation(self):
        tokenizer = self.entity_pair_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        # head and tail information
        spans = [(0, 8), (84, 89)]

        encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s><ent>Japanese<ent>is an East Asian language spoken by about 128 million people, primarily"
            " in<ent2>Japan<ent2>.</s>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][1:4], spaces_between_special_tokens=False),
            "<ent>Japanese<ent>",
        )
        self.assertEqual(
            tokenizer.decode(encoding["input_ids"][20:23], spaces_between_special_tokens=False), "<ent2>Japan<ent2>"
        )

        mask_id = tokenizer.entity_vocab["[MASK]"]
        mask2_id = tokenizer.entity_vocab["[MASK2]"]
        self.assertEqual(encoding["entity_ids"], [mask_id, mask2_id])
        self.assertEqual(encoding["entity_attention_mask"], [1, 1])
        self.assertEqual(encoding["entity_token_type_ids"], [0, 0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [20, 21, 22, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            ]
        )
        # fmt: on

    def test_entity_pair_classification_padding_pytorch_tensors(self):
        tokenizer = self.entity_pair_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        # head and tail information
        spans = [(0, 8), (84, 89)]

        encoding = tokenizer(
            sentence,
            entity_spans=spans,
            return_token_type_ids=True,
            padding="max_length",
            max_length=30,
            return_tensors="pt",
        )

        # test words
        self.assertEqual(encoding["input_ids"].shape, (1, 30))
        self.assertEqual(encoding["attention_mask"].shape, (1, 30))
        self.assertEqual(encoding["token_type_ids"].shape, (1, 30))

        # test entities
        self.assertEqual(encoding["entity_ids"].shape, (1, 2))
        self.assertEqual(encoding["entity_attention_mask"].shape, (1, 2))
        self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 2))
        self.assertEqual(
            encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length)
        )

    def test_entity_span_classification_no_padding_or_truncation(self):
        tokenizer = self.entity_span_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        spans = [(0, 8), (15, 34), (84, 89)]

        encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)

        self.assertEqual(
            tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False),
            "<s> Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.</s>",
        )

        mask_id = tokenizer.entity_vocab["[MASK]"]
        self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id])
        self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1])
        self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0])
        # fmt: off
        self.assertEqual(
            encoding["entity_position_ids"],
            [
                [1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [4, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
                [18, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]]
        )
        # fmt: on
        self.assertEqual(encoding["entity_start_positions"], [1, 4, 18])
        self.assertEqual(encoding["entity_end_positions"], [1, 6, 18])

    def test_entity_span_classification_padding_pytorch_tensors(self):
        tokenizer = self.entity_span_tokenizer

        sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
        spans = [(0, 8), (15, 34), (84, 89)]

        encoding = tokenizer(
            sentence,
            entity_spans=spans,
            return_token_type_ids=True,
            padding="max_length",
            max_length=30,
            max_entity_length=16,
            return_tensors="pt",
        )

        # test words
        self.assertEqual(encoding["input_ids"].shape, (1, 30))
        self.assertEqual(encoding["attention_mask"].shape, (1, 30))
        self.assertEqual(encoding["token_type_ids"].shape, (1, 30))

        # test entities
        self.assertEqual(encoding["entity_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16))
        self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16))
        self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length))
        self.assertEqual(encoding["entity_start_positions"].shape, (1, 16))
        self.assertEqual(encoding["entity_end_positions"].shape, (1, 16))
