|
- # coding=utf-8
- # Copyright 2020 The HuggingFace Team. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
-
- import os
- import unittest
-
- from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
- from transformers.testing_utils import require_sentencepiece, require_tokenizers, slow
-
- from .test_tokenization_common import TokenizerTesterMixin
-
-
- SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
-
-
- @require_sentencepiece
- @require_tokenizers
- class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
-
- tokenizer_class = XLNetTokenizer
- rust_tokenizer_class = XLNetTokenizerFast
- test_rust_tokenizer = True
- test_sentencepiece = True
-
- def setUp(self):
- super().setUp()
-
- # We have a SentencePiece fixture for testing
- tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
- tokenizer.sanitize_special_tokens()
- tokenizer.save_pretrained(self.tmpdirname)
-
- def test_convert_token_and_id(self):
- """Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
- token = "<s>"
- token_id = 1
-
- self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
- self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
-
- def test_get_vocab(self):
- vocab_keys = list(self.get_tokenizer().get_vocab().keys())
-
- self.assertEqual(vocab_keys[0], "<unk>")
- self.assertEqual(vocab_keys[1], "<s>")
- self.assertEqual(vocab_keys[-1], "<eod>")
- self.assertEqual(len(vocab_keys), 1_006)
-
- def test_vocab_size(self):
- self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
-
- def test_full_tokenizer(self):
- tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
-
- tokens = tokenizer.tokenize("This is a test")
- self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
-
- self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
-
- tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
- self.assertListEqual(
- tokens,
- [
- SPIECE_UNDERLINE + "I",
- SPIECE_UNDERLINE + "was",
- SPIECE_UNDERLINE + "b",
- "or",
- "n",
- SPIECE_UNDERLINE + "in",
- SPIECE_UNDERLINE + "",
- "9",
- "2",
- "0",
- "0",
- "0",
- ",",
- SPIECE_UNDERLINE + "and",
- SPIECE_UNDERLINE + "this",
- SPIECE_UNDERLINE + "is",
- SPIECE_UNDERLINE + "f",
- "al",
- "s",
- "é",
- ".",
- ],
- )
- ids = tokenizer.convert_tokens_to_ids(tokens)
- self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
-
- back_tokens = tokenizer.convert_ids_to_tokens(ids)
- self.assertListEqual(
- back_tokens,
- [
- SPIECE_UNDERLINE + "I",
- SPIECE_UNDERLINE + "was",
- SPIECE_UNDERLINE + "b",
- "or",
- "n",
- SPIECE_UNDERLINE + "in",
- SPIECE_UNDERLINE + "",
- "<unk>",
- "2",
- "0",
- "0",
- "0",
- ",",
- SPIECE_UNDERLINE + "and",
- SPIECE_UNDERLINE + "this",
- SPIECE_UNDERLINE + "is",
- SPIECE_UNDERLINE + "f",
- "al",
- "s",
- "<unk>",
- ".",
- ],
- )
-
- def test_tokenizer_lower(self):
- tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
- tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
- self.assertListEqual(
- tokens,
- [
- SPIECE_UNDERLINE + "",
- "i",
- SPIECE_UNDERLINE + "was",
- SPIECE_UNDERLINE + "b",
- "or",
- "n",
- SPIECE_UNDERLINE + "in",
- SPIECE_UNDERLINE + "",
- "9",
- "2",
- "0",
- "0",
- "0",
- ",",
- SPIECE_UNDERLINE + "and",
- SPIECE_UNDERLINE + "this",
- SPIECE_UNDERLINE + "is",
- SPIECE_UNDERLINE + "f",
- "al",
- "se",
- ".",
- ],
- )
- self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["▁he", "ll", "o"])
-
- def test_tokenizer_no_lower(self):
- tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
- tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
- self.assertListEqual(
- tokens,
- [
- SPIECE_UNDERLINE + "I",
- SPIECE_UNDERLINE + "was",
- SPIECE_UNDERLINE + "b",
- "or",
- "n",
- SPIECE_UNDERLINE + "in",
- SPIECE_UNDERLINE + "",
- "9",
- "2",
- "0",
- "0",
- "0",
- ",",
- SPIECE_UNDERLINE + "and",
- SPIECE_UNDERLINE + "this",
- SPIECE_UNDERLINE + "is",
- SPIECE_UNDERLINE + "f",
- "al",
- "se",
- ".",
- ],
- )
-
- @slow
- def test_sequence_builders(self):
- tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
-
- text = tokenizer.encode("sequence builders", add_special_tokens=False)
- text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
-
- encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
- encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
-
- assert encoded_sentence == text + [4, 3]
- assert encoded_pair == text + [4] + text_2 + [4, 3]
-
- @slow
- def test_tokenizer_integration(self):
- # fmt: off
- expected_encoding = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
- # fmt: on
-
- self.tokenizer_integration_test_util(
- expected_encoding=expected_encoding,
- model_name="xlnet-base-cased",
- revision="c841166438c31ec7ca9a106dee7bb312b73ae511",
- )
|