|
- # coding=utf-8
- # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """ Testing suite for the PyTorch RoFormer model. """
-
-
- import unittest
-
- from tests.test_modeling_common import floats_tensor
- from transformers import is_torch_available
- from transformers.testing_utils import require_torch, slow, torch_device
-
- from .test_configuration_common import ConfigTester
- from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
-
-
- if is_torch_available():
- import torch
-
- from transformers import (
- RoFormerConfig,
- RoFormerForCausalLM,
- RoFormerForMaskedLM,
- RoFormerForMultipleChoice,
- RoFormerForQuestionAnswering,
- RoFormerForSequenceClassification,
- RoFormerForTokenClassification,
- RoFormerModel,
- )
- from transformers.models.roformer.modeling_roformer import (
- ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
- RoFormerSelfAttention,
- RoFormerSinusoidalPositionalEmbedding,
- )
-
-
- class RoFormerModelTester:
- def __init__(
- self,
- parent,
- batch_size=13,
- seq_length=7,
- is_training=True,
- use_input_mask=True,
- use_token_type_ids=True,
- use_labels=True,
- vocab_size=99,
- hidden_size=32,
- num_hidden_layers=5,
- num_attention_heads=4,
- intermediate_size=37,
- hidden_act="gelu",
- hidden_dropout_prob=0.1,
- attention_probs_dropout_prob=0.1,
- max_position_embeddings=512,
- type_vocab_size=16,
- type_sequence_label_size=2,
- initializer_range=0.02,
- num_labels=3,
- num_choices=4,
- scope=None,
- ):
- self.parent = parent
- self.batch_size = batch_size
- self.seq_length = seq_length
- self.is_training = is_training
- self.use_input_mask = use_input_mask
- self.use_token_type_ids = use_token_type_ids
- self.use_labels = use_labels
- self.vocab_size = vocab_size
- self.hidden_size = hidden_size
- self.num_hidden_layers = num_hidden_layers
- self.num_attention_heads = num_attention_heads
- self.intermediate_size = intermediate_size
- self.hidden_act = hidden_act
- self.hidden_dropout_prob = hidden_dropout_prob
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
- self.max_position_embeddings = max_position_embeddings
- self.type_vocab_size = type_vocab_size
- self.type_sequence_label_size = type_sequence_label_size
- self.initializer_range = initializer_range
- self.num_labels = num_labels
- self.num_choices = num_choices
- self.scope = scope
-
- def prepare_config_and_inputs(self):
- input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
-
- input_mask = None
- if self.use_input_mask:
- input_mask = random_attention_mask([self.batch_size, self.seq_length])
-
- token_type_ids = None
- if self.use_token_type_ids:
- token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
-
- sequence_labels = None
- token_labels = None
- choice_labels = None
- if self.use_labels:
- sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
- token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
- choice_labels = ids_tensor([self.batch_size], self.num_choices)
-
- config = RoFormerConfig(
- vocab_size=self.vocab_size,
- hidden_size=self.hidden_size,
- num_hidden_layers=self.num_hidden_layers,
- num_attention_heads=self.num_attention_heads,
- intermediate_size=self.intermediate_size,
- hidden_act=self.hidden_act,
- hidden_dropout_prob=self.hidden_dropout_prob,
- attention_probs_dropout_prob=self.attention_probs_dropout_prob,
- max_position_embeddings=self.max_position_embeddings,
- type_vocab_size=self.type_vocab_size,
- is_decoder=False,
- initializer_range=self.initializer_range,
- )
-
- return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
-
- def prepare_config_and_inputs_for_decoder(self):
- (
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- ) = self.prepare_config_and_inputs()
-
- config.is_decoder = True
- encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
- encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
-
- return (
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- )
-
- def create_and_check_model(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- model = RoFormerModel(config=config)
- model.to(torch_device)
- model.eval()
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
- result = model(input_ids, token_type_ids=token_type_ids)
- result = model(input_ids)
- self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
-
- def create_and_check_model_as_decoder(
- self,
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- ):
- config.add_cross_attention = True
- model = RoFormerModel(config)
- model.to(torch_device)
- model.eval()
- result = model(
- input_ids,
- attention_mask=input_mask,
- token_type_ids=token_type_ids,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- )
- result = model(
- input_ids,
- attention_mask=input_mask,
- token_type_ids=token_type_ids,
- encoder_hidden_states=encoder_hidden_states,
- )
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
- self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
-
- def create_and_check_for_causal_lm(
- self,
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- ):
- model = RoFormerForCausalLM(config=config)
- model.to(torch_device)
- model.eval()
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
- self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
-
- def create_and_check_for_masked_lm(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- model = RoFormerForMaskedLM(config=config)
- model.to(torch_device)
- model.eval()
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
- self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
-
- def create_and_check_decoder_model_past_large_inputs(
- self,
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- ):
- config.is_decoder = True
- config.add_cross_attention = True
- model = RoFormerForCausalLM(config=config)
- model.to(torch_device)
- model.eval()
-
- # first forward pass
- outputs = model(
- input_ids,
- attention_mask=input_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- use_cache=True,
- )
- past_key_values = outputs.past_key_values
-
- # create hypothetical multiple next token and extent to next_input_ids
- next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
- next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
-
- # append to next input_ids and
- next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
- next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
-
- output_from_no_past = model(
- next_input_ids,
- attention_mask=next_attention_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- output_hidden_states=True,
- )["hidden_states"][0]
- output_from_past = model(
- next_tokens,
- attention_mask=next_attention_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- past_key_values=past_key_values,
- output_hidden_states=True,
- )["hidden_states"][0]
-
- # select random slice
- random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
- output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
- output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
-
- self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
-
- # test that outputs are equal for slice
- self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
-
- def create_and_check_for_question_answering(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- model = RoFormerForQuestionAnswering(config=config)
- model.to(torch_device)
- model.eval()
- result = model(
- input_ids,
- attention_mask=input_mask,
- token_type_ids=token_type_ids,
- start_positions=sequence_labels,
- end_positions=sequence_labels,
- )
- self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
- self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
-
- def create_and_check_for_sequence_classification(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- config.num_labels = self.num_labels
- model = RoFormerForSequenceClassification(config)
- model.to(torch_device)
- model.eval()
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
- self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
-
- def create_and_check_for_token_classification(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- config.num_labels = self.num_labels
- model = RoFormerForTokenClassification(config=config)
- model.to(torch_device)
- model.eval()
- result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
- self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
-
- def create_and_check_for_multiple_choice(
- self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
- ):
- config.num_choices = self.num_choices
- model = RoFormerForMultipleChoice(config=config)
- model.to(torch_device)
- model.eval()
- multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
- multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
- multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
- result = model(
- multiple_choice_inputs_ids,
- attention_mask=multiple_choice_input_mask,
- token_type_ids=multiple_choice_token_type_ids,
- labels=choice_labels,
- )
- self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
-
- def prepare_config_and_inputs_for_common(self):
- config_and_inputs = self.prepare_config_and_inputs()
- (
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- ) = config_and_inputs
- inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
- return config, inputs_dict
-
-
- @require_torch
- class RoFormerModelTest(ModelTesterMixin, unittest.TestCase):
-
- all_model_classes = (
- (
- RoFormerModel,
- RoFormerForMaskedLM,
- RoFormerForCausalLM,
- RoFormerForMultipleChoice,
- RoFormerForQuestionAnswering,
- RoFormerForSequenceClassification,
- RoFormerForTokenClassification,
- )
- if is_torch_available()
- else ()
- )
- all_generative_model_classes = (RoFormerForCausalLM,) if is_torch_available() else ()
-
- def setUp(self):
- self.model_tester = RoFormerModelTester(self)
- self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37)
-
- def test_config(self):
- self.config_tester.run_common_tests()
-
- def test_model(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_model(*config_and_inputs)
-
- def test_for_masked_lm(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
-
- def test_for_multiple_choice(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
-
- def test_decoder_model_past_with_large_inputs(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
- self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
-
- def test_for_question_answering(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
-
- def test_for_sequence_classification(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
-
- def test_for_token_classification(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs()
- self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
-
- def test_model_as_decoder(self):
- config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
- self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
-
- def test_model_as_decoder_with_default_input_mask(self):
- # This regression test was failing with PyTorch < 1.3
- (
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- ) = self.model_tester.prepare_config_and_inputs_for_decoder()
-
- input_mask = None
-
- self.model_tester.create_and_check_model_as_decoder(
- config,
- input_ids,
- token_type_ids,
- input_mask,
- sequence_labels,
- token_labels,
- choice_labels,
- encoder_hidden_states,
- encoder_attention_mask,
- )
-
- @slow
- def test_model_from_pretrained(self):
- for model_name in ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
- model = RoFormerModel.from_pretrained(model_name)
- self.assertIsNotNone(model)
-
-
- @require_torch
- class RoFormerModelIntegrationTest(unittest.TestCase):
- @slow
- def test_inference_masked_lm(self):
- model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
- input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
- output = model(input_ids)[0]
-
- # TODO Replace vocab size
- vocab_size = 50000
-
- expected_shape = torch.Size((1, 6, vocab_size))
- self.assertEqual(output.shape, expected_shape)
-
- # TODO Replace values below with what was printed above.
- expected_slice = torch.tensor(
- [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]]
- )
-
- self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
-
-
- @require_torch
- class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase):
- tolerance = 1e-4
-
- def test_basic(self):
- input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device)
- emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6).to(torch_device)
- emb = emb1(input_ids.shape)
- desired_weights = torch.tensor(
- [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]]
- ).to(torch_device)
- self.assertTrue(
- torch.allclose(emb, desired_weights, atol=self.tolerance),
- msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n",
- )
-
- def test_positional_emb_weights_against_roformer(self):
-
- desired_weights = torch.tensor(
- [
- [0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
- [0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
- [0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
- ]
- ).to(torch_device)
- emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512).to(torch_device)
- weights = emb1.weight.data[:3, :5].to(torch_device)
-
- self.assertTrue(
- torch.allclose(weights, desired_weights, atol=self.tolerance),
- msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n",
- )
-
-
- @require_torch
- class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase):
- tolerance = 1e-4
-
- def test_apply_rotary_position_embeddings(self):
- # 2,12,16,64
- query_layer = (
- torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100
- ).to(torch_device)
- key_layer = (
- -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100
- ).to(torch_device)
- embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64).to(torch_device)
- sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :]
-
- query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings(
- sinusoidal_pos, query_layer, key_layer
- )
-
- desired_query_layer = torch.tensor(
- [
- [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
- [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
- [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
- [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
- [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
- [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
- ]
- ).to(torch_device)
- desired_key_layer = torch.tensor(
- [
- [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
- [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
- [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
- [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
- [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
- [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
- ]
- ).to(torch_device)
-
- self.assertTrue(
- torch.allclose(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance),
- msg=f"\nexp:\n{desired_query_layer}\ngot:\n{query_layer}\n",
- )
- self.assertTrue(
- torch.allclose(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance),
- msg=f"\nexp:\n{desired_key_layer}\ngot:\n{key_layer}\n",
- )
|