|
- # Copyright 2023 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- # ============================================================================
-
- """roberta model, base on bert."""
- import mindspore
- from mindspore import nn, ops
- from mindspore import Parameter
- from mindspore.common.initializer import initializer
-
- from mindnlp._legacy.nn import Dropout
- from .configuration_roberta import RobertaConfig
- from ..bert.modeling_bert import BertModel, BertPreTrainedModel
-
-
- class MSRobertaEmbeddings(nn.Cell):
- """
- Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
- """
-
- # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
- def __init__(self, config):
- super().__init__()
- self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
- self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
- self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
-
- # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
- # any TensorFlow checkpoint file
- self.LayerNorm = nn.LayerNorm([config.hidden_size], epsilon=config.layer_norm_eps)
- self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
- # position_ids (1, len position emb) is contiguous in memory and exported when serialized
- self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
- self.position_ids = ops.arange(config.max_position_embeddings).view((1, -1))
- self.token_type_ids = ops.zeros(self.position_ids.shape, dtype=mindspore.int64)
-
- # End copy
- self.padding_idx = config.pad_token_id
- self.position_embeddings = nn.Embedding(
- config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
- )
-
- def construct(
- self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
- ):
- if position_ids is None:
- if input_ids is not None:
- # Create the position ids from the input token ids. Any padded tokens remain padded.
- position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
- else:
- position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
-
- if input_ids is not None:
- input_shape = input_ids.shape
- else:
- input_shape = inputs_embeds.shape[:-1]
-
- seq_length = input_shape[1]
-
- # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
- # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
- # issue #5664
- if token_type_ids is None:
- if hasattr(self, "token_type_ids"):
- buffered_token_type_ids = self.token_type_ids[:, :seq_length]
- buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
- token_type_ids = buffered_token_type_ids_expanded
- else:
- token_type_ids = ops.zeros(input_shape, dtype=mindspore.int64)
-
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
- token_type_embeddings = self.token_type_embeddings(token_type_ids)
-
- embeddings = inputs_embeds + token_type_embeddings
- if self.position_embedding_type == "absolute":
- position_embeddings = self.position_embeddings(position_ids)
- embeddings += position_embeddings
- embeddings = self.LayerNorm(embeddings)
- embeddings = self.dropout(embeddings)
- return embeddings
-
- def create_position_ids_from_inputs_embeds(self, inputs_embeds):
- """
- We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
-
- Args:
- inputs_embeds: torch.Tensor
-
- Returns: torch.Tensor
- """
- input_shape = inputs_embeds.shape[:-1]
- sequence_length = input_shape[1]
-
- position_ids = ops.arange(
- self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=mindspore.int64
- )
- return position_ids.unsqueeze(0).broadcast_to(input_shape)
-
-
-
- class MSRobertaPreTrainedModel(BertPreTrainedModel):
- """Roberta Pretrained Model."""
-
- config_class = RobertaConfig
- base_model_prefix = "roberta"
-
- class MSRobertaModel(BertModel):
- """Roberta Model"""
-
- config_class = RobertaConfig
- base_model_prefix = "roberta"
-
- def __init__(self, config, add_pooling_layer=True):
- super().__init__(config, add_pooling_layer=add_pooling_layer)
- self.embeddings = MSRobertaEmbeddings(config)
-
- class MSRobertaLMHead(nn.Cell):
- """RobertaLMHead"""
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Dense(config.hidden_size, config.hidden_size)
- self.layer_norm = nn.LayerNorm((config.hidden_size,), epsilon=config.layer_norm_eps)
-
- self.decoder = nn.Dense(config.hidden_size, config.vocab_size, has_bias=False)
- self.bias = Parameter(initializer('zeros', config.vocab_size), 'bias')
- self.gelu = nn.GELU()
-
- def construct(self, features):
- x = self.dense(features)
- x = self.gelu(x)
- x = self.layer_norm(x)
-
- x = self.decoder(x) + self.bias
- return x
-
- class MSRobertaClassificationHead(nn.Cell):
- """RobertaClassificationHead"""
- def __init__(self, config):
- super().__init__()
- self.dense = nn.Dense(config.hidden_size, config.hidden_size, activation='tanh')
- self.dropout = Dropout(p=1-config.hidden_dropout_prob)
- self.out_proj = nn.Dense(config.hidden_size, config.num_labels)
-
- def construct(self, features):
- x = features[:, 0, :]
- x = self.dropout(x)
- x = self.dense(x)
- x = self.dropout(x)
- x = self.out_proj(x)
- return x
-
- class MSRobertaForMaskedLM(MSRobertaPreTrainedModel):
- """RobertaForMaskedLM"""
- def __init__(self, config, *args, **kwargs):
- super().__init__(config, *args, **kwargs)
- self.roberta = MSRobertaModel(config)
- self.lm_head = MSRobertaLMHead(config)
- self.lm_head.decoder.weight = self.roberta.embeddings.word_embeddings.weight
- self.vocab_size = self.config.vocab_size
-
- def get_output_embeddings(self):
- return self.lm_head.decoder
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head.decoder = new_embeddings
-
- def construct(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
- masked_lm_labels=None):
- outputs = self.roberta(input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask)
- sequence_output = outputs[0]
- prediction_scores = self.lm_head(sequence_output)
-
- outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
-
- if masked_lm_labels is not None:
- masked_lm_loss = ops.cross_entropy(prediction_scores.view(-1, self.vocab_size),
- masked_lm_labels.view(-1), ignore_index=-1)
- outputs = (masked_lm_loss,) + outputs
-
- return outputs
-
- class MSRobertaForSequenceClassification(MSRobertaPreTrainedModel):
- """MSRobertaForSequenceClassification"""
- def __init__(self, config, *args, **kwargs):
- super().__init__(config, *args, **kwargs)
- self.num_labels = config.num_labels
- self.roberta = MSRobertaModel(config, add_pooling_layer=False)
- self.classifier = MSRobertaClassificationHead(config)
-
- def construct(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
- labels=None):
- outputs = self.roberta(input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask)
- sequence_output = outputs[0]
- logits = self.classifier(sequence_output)
-
- outputs = (logits,) + outputs[2:]
- if labels is not None:
- if self.num_labels == 1:
- # We are doing regression
- loss = ops.mse_loss(logits.view(-1), labels.view(-1))
- else:
- loss = ops.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
- outputs = (loss,) + outputs
-
- return outputs # (loss), logits, (hidden_states), (attentions)
-
- class MSRobertaForMultipleChoice(MSRobertaPreTrainedModel):
- """RobertaForMultipleChoice"""
- def __init__(self, config, *args, **kwargs):
- super().__init__(config, *args, **kwargs)
- self.roberta = MSRobertaModel(config)
- self.dropout = Dropout(p=config.hidden_dropout_prob)
- self.classifier = nn.Dense(config.hidden_size, 1)
-
- def construct(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
- position_ids=None, head_mask=None):
- num_choices = input_ids.shape[1]
-
- flat_input_ids = input_ids.view(-1, input_ids.size(-1))
- flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
- flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
- flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
- outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
- attention_mask=flat_attention_mask, head_mask=head_mask)
- pooled_output = outputs[1]
-
- pooled_output = self.dropout(pooled_output)
- logits = self.classifier(pooled_output)
- reshaped_logits = logits.view(-1, num_choices)
-
- outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
-
- if labels is not None:
- loss = ops.cross_entropy(reshaped_logits, labels)
- outputs = (loss,) + outputs
-
- return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
-
-
- def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
- """
- Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
- are ignored. This is modified from fairseq's `utils.make_positions`.
-
- Args:
- x: torch.Tensor x:
-
- Returns: torch.Tensor
- """
- # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
- mask = input_ids.ne(padding_idx).int()
- incremental_indices = (ops.cumsum(mask, axis=1).astype(mask.dtype) + past_key_values_length) * mask
- return incremental_indices.long() + padding_idx
-
-
- __all__ = ['MSRobertaModel', 'MSRobertaPreTrainedModel',
- 'MSRobertaForMaskedLM', 'MSRobertaForMultipleChoice',
- 'MSRobertaForSequenceClassification']
|