|
- # coding=utf-8
- # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """MindSpore GPT-2 model."""
-
- import math
- import copy
- from dataclasses import dataclass
- from typing import Optional, Tuple, Union
- import numpy as np
- import mindspore
- from mindspore import ops, nn
- from mindspore.common.initializer import initializer, Normal
-
- from mindnlp.utils import (
- ModelOutput,
- logging,
- )
- from ...activations import ACT2FN
- from ...modeling_outputs import (
- BaseModelOutputWithPastAndCrossAttentions,
- CausalLMOutputWithCrossAttentions,
- QuestionAnsweringModelOutput,
- SequenceClassifierOutputWithPast,
- TokenClassifierOutput,
- )
- from ...modeling_utils import PreTrainedModel, SequenceSummary
- from ...ms_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
- from .configuration_gpt2 import GPT2Config
-
-
- logger = logging.get_logger(__name__)
-
-
- GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "gpt2",
- "gpt2-medium",
- "gpt2-large",
- "gpt2-xl",
- "distilgpt2",
- # See all GPT-2 models at https://hf-mirror.com/models?filter=gpt2
- ]
-
-
- class GPT2Attention(nn.Cell):
- def __init__(self, config, is_cross_attention=False, layer_idx=None):
- super().__init__()
-
- max_positions = config.max_position_embeddings
- self.bias = ops.tril(ops.ones((max_positions, max_positions), dtype=mindspore.bool_)).view(
- 1, 1, max_positions, max_positions
- )
- self.masked_bias = mindspore.Tensor(-1e4)
-
- self.embed_dim = config.hidden_size
- self.num_heads = config.num_attention_heads
- self.head_dim = self.embed_dim // self.num_heads
- self.split_size = self.embed_dim
- if self.head_dim * self.num_heads != self.embed_dim:
- raise ValueError(
- f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
- f" {self.num_heads})."
- )
-
- self.scale_attn_weights = config.scale_attn_weights
- self.is_cross_attention = is_cross_attention
-
- # Layer-wise attention scaling, reordering, and upcasting
- self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
- self.layer_idx = layer_idx
- self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
-
- if self.is_cross_attention:
- self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
- self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
- else:
- self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
- self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
-
- self.attn_dropout = nn.Dropout(p=config.attn_pdrop)
- self.resid_dropout = nn.Dropout(p=config.resid_pdrop)
-
- self.pruned_heads = set()
-
- def prune_heads(self, heads):
- if len(heads) == 0:
- return
- heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
- index_attn = ops.cat([index, index + self.split_size, index + (2 * self.split_size)])
-
- # Prune conv1d layers
- self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, axis=1)
- self.c_proj = prune_conv1d_layer(self.c_proj, index, axis=0)
-
- # Update hyper params
- self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
- self.num_heads = self.num_heads - len(heads)
- self.pruned_heads = self.pruned_heads.union(heads)
-
- def _attn(self, query, key, value, attention_mask=None, head_mask=None):
- attn_weights = ops.matmul(query, key.swapaxes(-1, -2))
-
- if self.scale_attn_weights:
- attn_weights = attn_weights / ops.full(
- [], value.shape[-1] ** 0.5, dtype=attn_weights.dtype
- )
-
- # Layer-wise attention scaling
- if self.scale_attn_by_inverse_layer_idx:
- attn_weights = attn_weights / float(self.layer_idx + 1)
-
- if not self.is_cross_attention:
- # if only "normal" attention layer implements causal mask
- query_length, key_length = query.shape[-2], key.shape[-2]
- causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
- mask_value = float(np.finfo(mindspore.dtype_to_nptype(attn_weights.dtype)).min)
- # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
- mask_value = ops.full([], mask_value, dtype=attn_weights.dtype)
- attn_weights = ops.where(causal_mask, attn_weights.astype(attn_weights.dtype), mask_value)
-
- if attention_mask is not None:
- # Apply the attention mask
- attn_weights = attn_weights + attention_mask
-
- attn_weights = ops.softmax(attn_weights, axis=-1)
-
- # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
- attn_weights = attn_weights.astype(value.dtype)
- attn_weights = self.attn_dropout(attn_weights)
-
- # Mask heads if we want to
- if head_mask is not None:
- attn_weights = attn_weights * head_mask
-
- attn_output = ops.matmul(attn_weights, value)
-
- return attn_output, attn_weights
-
- def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
- bsz, num_heads, q_seq_len, dk = query.shape
- _, _, k_seq_len, _ = key.shape
-
- # Preallocate attn_weights for `baddbmm`
- attn_weights = ops.zeros((bsz * num_heads, q_seq_len, k_seq_len), dtype=mindspore.float32)
-
- # Compute Scale Factor
- scale_factor = 1.0
- if self.scale_attn_weights:
- scale_factor /= float(value.shape[-1]) ** 0.5
-
- if self.scale_attn_by_inverse_layer_idx:
- scale_factor /= float(self.layer_idx + 1)
-
- # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
- q, k = query.reshape(-1, q_seq_len, dk), key.swapaxes(-1, -2).reshape(-1, dk, k_seq_len)
- attn_weights = ops.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
- attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
-
- if not self.is_cross_attention:
- # if only "normal" attention layer implements causal mask
- query_length, key_length = query.shape[-2], key.shape[-2]
- causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
- mask_value = float(np.finfo(mindspore.dtype_to_nptype(attn_weights.dtype)).min)
- # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
- mask_value = mindspore.Tensor(mask_value, dtype=attn_weights.dtype)
- attn_weights = ops.where(causal_mask, attn_weights, mask_value)
-
- if attention_mask is not None:
- # Apply the attention mask
- attn_weights = attn_weights + attention_mask
-
- attn_weights = ops.softmax(attn_weights, axis=-1)
-
- # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
- if attn_weights.dtype != mindspore.float32:
- raise RuntimeError("Error with upcasting, attn_weights does not have dtype mindspore.float32")
- attn_weights = attn_weights.astype(value.dtype)
- attn_weights = self.attn_dropout(attn_weights)
-
- # Mask heads if we want to
- if head_mask is not None:
- attn_weights = attn_weights * head_mask
-
- attn_output = ops.matmul(attn_weights, value)
-
- return attn_output, attn_weights
-
- def _split_heads(self, tensor, num_heads, attn_head_size):
- """
- Splits hidden_size dim into attn_head_size and num_heads
- """
- new_shape = tensor.shape[:-1] + (num_heads, attn_head_size)
- tensor = tensor.view(new_shape)
- return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
-
- def _merge_heads(self, tensor, num_heads, attn_head_size):
- """
- Merges attn_head_size dim and num_attn_heads dim into hidden_size
- """
- tensor = tensor.permute(0, 2, 1, 3)
- new_shape = tensor.shape[:-2] + (num_heads * attn_head_size,)
- return tensor.view(new_shape)
-
- def construct(
- self,
- hidden_states: Optional[Tuple[mindspore.Tensor]],
- layer_past: Optional[Tuple[mindspore.Tensor]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- encoder_hidden_states: Optional[mindspore.Tensor] = None,
- encoder_attention_mask: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Tuple[Union[mindspore.Tensor, Tuple[mindspore.Tensor]], ...]:
- if encoder_hidden_states is not None:
- if not hasattr(self, "q_attn"):
- raise ValueError(
- "If class is used as cross attention, the weights `q_attn` have to be defined. "
- "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
- )
-
- query = self.q_attn(hidden_states)
- key, value = self.c_attn(encoder_hidden_states).split(self.split_size, axis=2)
- attention_mask = encoder_attention_mask
- else:
- query, key, value = self.c_attn(hidden_states).split(self.split_size, axis=2)
-
- query = self._split_heads(query, self.num_heads, self.head_dim)
- key = self._split_heads(key, self.num_heads, self.head_dim)
- value = self._split_heads(value, self.num_heads, self.head_dim)
-
- if layer_past is not None:
- past_key, past_value = layer_past
- key = ops.cat((past_key, key), axis=-2)
- value = ops.cat((past_value, value), axis=-2)
-
- if use_cache is True:
- present = (key, value)
- else:
- present = None
-
- if self.reorder_and_upcast_attn:
- attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
- else:
- attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
-
- attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
- attn_output = self.c_proj(attn_output)
- attn_output = self.resid_dropout(attn_output)
-
- outputs = (attn_output, present)
- if output_attentions:
- outputs += (attn_weights,)
-
- return outputs # a, present, (attentions)
-
-
- class GPT2MLP(nn.Cell):
- def __init__(self, intermediate_size, config):
- super().__init__()
- embed_dim = config.hidden_size
- self.c_fc = Conv1D(intermediate_size, embed_dim)
- self.c_proj = Conv1D(embed_dim, intermediate_size)
- self.act = ACT2FN[config.activation_function]
- self.dropout = nn.Dropout(p=config.resid_pdrop)
-
- def construct(self, hidden_states: Optional[Tuple[mindspore.Tensor]]) -> mindspore.Tensor:
- hidden_states = self.c_fc(hidden_states)
- hidden_states = self.act(hidden_states)
- hidden_states = self.c_proj(hidden_states)
- hidden_states = self.dropout(hidden_states)
- return hidden_states
-
-
- class GPT2Block(nn.Cell):
- def __init__(self, config, layer_idx=None):
- super().__init__()
- hidden_size = config.hidden_size
- inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
-
- self.ln_1 = nn.LayerNorm([hidden_size], epsilon=config.layer_norm_epsilon)
- self.attn = GPT2Attention(config, layer_idx=layer_idx)
- self.ln_2 = nn.LayerNorm([hidden_size], epsilon=config.layer_norm_epsilon)
-
- if config.add_cross_attention:
- self.crossattention = GPT2Attention(config, is_cross_attention=True, layer_idx=layer_idx)
- self.ln_cross_attn = nn.LayerNorm([hidden_size], epsilon=config.layer_norm_epsilon)
-
- self.mlp = GPT2MLP(inner_dim, config)
-
- def construct(
- self,
- hidden_states: Optional[Tuple[mindspore.Tensor]],
- layer_past: Optional[Tuple[mindspore.Tensor]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- encoder_hidden_states: Optional[mindspore.Tensor] = None,
- encoder_attention_mask: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Union[Tuple[mindspore.Tensor], Optional[Tuple[mindspore.Tensor, Tuple[mindspore.Tensor, ...]]]]:
- residual = hidden_states
- hidden_states = self.ln_1(hidden_states)
- attn_outputs = self.attn(
- hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- head_mask=head_mask,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
- attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
- outputs = attn_outputs[1:]
- # residual connection
- hidden_states = attn_output + residual
-
- if encoder_hidden_states is not None:
- # add one self-attention block for cross-attention
- if not hasattr(self, "crossattention"):
- raise ValueError(
- f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
- "cross-attention layers by setting `config.add_cross_attention=True`"
- )
- residual = hidden_states
- hidden_states = self.ln_cross_attn(hidden_states)
- cross_attn_outputs = self.crossattention(
- hidden_states,
- attention_mask=attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- output_attentions=output_attentions,
- )
- attn_output = cross_attn_outputs[0]
- # residual connection
- hidden_states = residual + attn_output
- outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
-
- residual = hidden_states
- hidden_states = self.ln_2(hidden_states)
- feed_forward_hidden_states = self.mlp(hidden_states)
- # residual connection
- hidden_states = residual + feed_forward_hidden_states
-
- if use_cache:
- outputs = (hidden_states,) + outputs
- else:
- outputs = (hidden_states,) + outputs[1:]
-
- return outputs # hidden_states, present, (attentions, cross_attentions)
-
-
- class GPT2PreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
-
- config_class = GPT2Config
- base_model_prefix = "transformer"
- is_parallelizable = True
- _no_split_modules = ["GPT2Block"]
- _keys_to_ignore_on_load_unexpected = [r'^(?:transformer\.)?h\.\d+\.attn\.bias$']
-
- def _init_weights(self, cell):
- """Initialize the weights"""
- if isinstance(cell, (nn.Dense, Conv1D)):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- cell.weight.set_data(initializer(Normal(self.config.initializer_range),
- cell.weight.shape, cell.weight.dtype))
- if cell.bias is not None:
- cell.bias.set_data(initializer('zeros', cell.bias.shape, cell.bias.dtype))
- elif isinstance(cell, nn.Embedding):
- weight = initializer(Normal(self.config.initializer_range),
- cell.weight.shape,
- cell.weight.dtype)
- if cell.padding_idx is not None:
- weight[cell.padding_idx] = 0
- cell.weight.set_data(weight)
- elif isinstance(cell, nn.LayerNorm):
- cell.weight.set_data(initializer('ones', cell.weight.shape, cell.weight.dtype))
- cell.bias.set_data(initializer('zeros', cell.bias.shape, cell.bias.dtype))
-
- # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
- # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
- # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
- # > -- GPT-2 :: https://openai.com/blog/better-language-models/
- #
- # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
- for name, p in cell.parameters_and_names():
- if name == "c_proj.weight":
- # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
- p.set_data(initializer(Normal((self.config.initializer_range / math.sqrt(2 * self.config.n_layer))),
- p.shape, p.dtype))
-
- @dataclass
- class GPT2DoubleHeadsModelOutput(ModelOutput):
- """
- Base class for outputs of models predicting if two sentences are consecutive or not.
-
- Args:
- loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
- Language modeling loss.
- mc_loss (`mindspore.Tensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided):
- Multiple choice classification loss.
- logits (`mindspore.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):
- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
- mc_logits (`mindspore.Tensor` of shape `(batch_size, num_choices)`):
- Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
- past_key_values (`Tuple[Tuple[mindspore.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
- Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads,
- sequence_length, embed_size_per_head)`).
-
- Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
- `past_key_values` input) to speed up sequential decoding.
- hidden_states (`tuple(mindspore.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `mindspore.Tensor` (one for the output of the embeddings + one for the output of each layer) of
- shape `(batch_size, sequence_length, hidden_size)`.
-
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(mindspore.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `mindspore.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
-
- GPT2Attentions weights after the attention softmax, used to compute the weighted average in the
- self-attention heads.
- """
-
- loss: Optional[mindspore.Tensor] = None
- mc_loss: Optional[mindspore.Tensor] = None
- logits: mindspore.Tensor = None
- mc_logits: mindspore.Tensor = None
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None
- hidden_states: Optional[Tuple[mindspore.Tensor]] = None
- attentions: Optional[Tuple[mindspore.Tensor]] = None
-
-
- class GPT2Model(GPT2PreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
-
- self.embed_dim = config.hidden_size
-
- self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
- self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
-
- self.drop = nn.Dropout(p=config.embd_pdrop)
- self.h = nn.CellList([GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)])
- self.ln_f = nn.LayerNorm([self.embed_dim], epsilon=config.layer_norm_epsilon)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_input_embeddings(self):
- return self.wte
-
- def set_input_embeddings(self, new_embeddings):
- self.wte = new_embeddings
-
- def _prune_heads(self, heads_to_prune):
- """
- Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
- """
- for layer, heads in heads_to_prune.items():
- self.h[layer].attn.prune_heads(heads)
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- encoder_hidden_states: Optional[mindspore.Tensor] = None,
- encoder_attention_mask: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- if input_ids is not None:
- self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
- input_shape = input_ids.shape
- input_ids = input_ids.view(-1, input_shape[-1])
- batch_size = input_ids.shape[0]
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.shape[:-1]
- batch_size = inputs_embeds.shape[0]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
-
- if token_type_ids is not None:
- token_type_ids = token_type_ids.view(-1, input_shape[-1])
-
- if past_key_values is None:
- past_length = 0
- past_key_values = tuple([None] * len(self.h))
- else:
- past_length = past_key_values[0][0].shape[-2]
- if position_ids is None:
- position_ids = ops.arange(past_length, input_shape[-1] + past_length, dtype=mindspore.int64)
- position_ids = position_ids.unsqueeze(0)
-
- # GPT2Attention mask.
- if attention_mask is not None:
- if batch_size <= 0:
- raise ValueError("batch_size has to be defined and > 0")
- attention_mask = attention_mask.view(batch_size, -1)
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- attention_mask = attention_mask[:, None, None, :]
-
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and the dtype's smallest value for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
- attention_mask = (1.0 - attention_mask) * float(np.finfo(mindspore.dtype_to_nptype(self.dtype)).min)
-
- # If a 2D or 3D attention mask is provided for the cross-attention
- # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
- if self.config.add_cross_attention and encoder_hidden_states is not None:
- encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.shape
- encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
- if encoder_attention_mask is None:
- encoder_attention_mask = ops.ones(encoder_hidden_shape)
- encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
- else:
- encoder_attention_mask = None
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x n_heads x N x N
- # head_mask has shape n_layer x batch x n_heads x N x N
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
-
- if inputs_embeds is None:
- inputs_embeds = self.wte(input_ids)
- position_embeds = self.wpe(position_ids)
- hidden_states = inputs_embeds + position_embeds
-
- if token_type_ids is not None:
- token_type_embeds = self.wte(token_type_ids)
- hidden_states = hidden_states + token_type_embeds
-
- hidden_states = self.drop(hidden_states)
-
- output_shape = (-1,) + input_shape[1:] + (hidden_states.shape[-1],)
-
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
- all_hidden_states = () if output_hidden_states else None
- for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- outputs = block(
- hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- head_mask=head_mask[i],
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
-
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
-
- if output_attentions:
- all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
- if self.config.add_cross_attention:
- all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
-
- hidden_states = self.ln_f(hidden_states)
-
- hidden_states = hidden_states.view(output_shape)
- # Add last hidden state
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
-
- if not return_dict:
- return tuple(
- v
- for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
- if v is not None
- )
-
- return BaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- past_key_values=presents,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- cross_attentions=all_cross_attentions,
- )
-
-
- class GPT2LMHeadModel(GPT2PreTrainedModel):
- _tied_weights_keys = ["lm_head.weight"]
-
- def __init__(self, config):
- super().__init__(config)
- self.transformer = GPT2Model(config)
- self.lm_head = nn.Dense(config.n_embd, config.vocab_size, has_bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- token_type_ids = kwargs.get("token_type_ids", None)
- # Omit tokens covered by past_key_values
- if past_key_values:
- past_length = past_key_values[0][0].shape[2]
-
- # Some generation methods already pass only the last input ID
- if input_ids.shape[1] > past_length:
- remove_prefix_length = past_length
- else:
- # Default to old behavior: keep only final ID
- remove_prefix_length = input_ids.shape[1] - 1
-
- input_ids = input_ids[:, remove_prefix_length:]
- if token_type_ids is not None:
- token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
-
- attention_mask = kwargs.get("attention_mask", None)
- position_ids = kwargs.get("position_ids", None)
-
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids = position_ids.masked_fill(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -input_ids.shape[1] :]
- else:
- position_ids = None
-
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
- if inputs_embeds is not None and past_key_values is None:
- model_inputs = {"inputs_embeds": inputs_embeds}
- else:
- model_inputs = {"input_ids": input_ids}
-
- model_inputs.update(
- {
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "position_ids": position_ids,
- "attention_mask": attention_mask,
- "token_type_ids": token_type_ids,
- }
- )
-
- return model_inputs
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- encoder_hidden_states: Optional[mindspore.Tensor] = None,
- encoder_attention_mask: Optional[mindspore.Tensor] = None,
- labels: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
- r"""
- labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
- `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
- are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
-
- lm_logits = self.lm_head(hidden_states)
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = lm_logits[..., :-1, :]
- shift_labels = labels[..., 1:]
- # Flatten the tokens
- loss = ops.cross_entropy(shift_logits.view(-1, shift_logits.shape[-1]), shift_labels.view(-1))
-
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return CausalLMOutputWithCrossAttentions(
- loss=loss,
- logits=lm_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- cross_attentions=transformer_outputs.cross_attentions,
- )
-
- @staticmethod
- def _reorder_cache(
- past_key_values: Tuple[Tuple[mindspore.Tensor]], beam_idx: mindspore.Tensor
- ) -> Tuple[Tuple[mindspore.Tensor]]:
- """
- This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
- [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
- beam_idx at every generation step.
- """
- return tuple(
- tuple(past_state.index_select(0, beam_idx) for past_state in layer_past)
- for layer_past in past_key_values
- )
-
-
- class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
- _tied_weights_keys = ["lm_head.weight"]
-
- def __init__(self, config):
- super().__init__(config)
- config = copy.deepcopy(config)
- config.num_labels = 1
- self.transformer = GPT2Model(config)
- self.lm_head = nn.Dense(config.n_embd, config.vocab_size, has_bias=False)
- self.multiple_choice_head = SequenceSummary(config)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def get_output_embeddings(self):
- return self.lm_head
-
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
-
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
- token_type_ids = kwargs.get("token_type_ids", None)
- # Omit tokens covered by past_key_values
- if past_key_values:
- past_length = past_key_values[0][0].shape[2]
-
- # Some generation methods already pass only the last input ID
- if input_ids.shape[1] > past_length:
- remove_prefix_length = past_length
- else:
- # Default to old behavior: keep only final ID
- remove_prefix_length = input_ids.shape[1] - 1
-
- input_ids = input_ids[:, remove_prefix_length:]
- if token_type_ids is not None:
- token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
-
- attention_mask = kwargs.get("attention_mask", None)
- position_ids = kwargs.get("position_ids", None)
-
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids = position_ids.masked_fill(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -input_ids.shape[1] :]
- else:
- position_ids = None
-
- return {
- "input_ids": input_ids,
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "position_ids": position_ids,
- "attention_mask": attention_mask,
- "token_type_ids": token_type_ids,
- }
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- mc_token_ids: Optional[mindspore.Tensor] = None,
- labels: Optional[mindspore.Tensor] = None,
- mc_labels: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- **kwargs,
- ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]:
- r"""
- mc_token_ids (`mindspore.Tensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
- Index of the classification token in each input sequence. Selected in the range `[0, input_ids.shape[-1] -
- 1]`.
- labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
- `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to
- `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`
- mc_labels (`mindspore.Tensor` of shape `(batch_size)`, *optional*):
- Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
- where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
-
- Return:
-
- Example:
-
- ```python
-
- >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel
-
- >>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
- >>> model = GPT2DoubleHeadsModel.from_pretrained("gpt2")
-
- >>> # Add a [CLS] to the vocabulary (we should train it also!)
- >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"})
- >>> # Update the model embeddings with the new vocabulary size
- >>> embedding_layer = model.resize_token_embeddings(len(tokenizer))
-
- >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
- >>> encoded_choices = [tokenizer.encode(s) for s in choices]
- >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
-
- >>> input_ids = mindspore.Tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
- >>> mc_token_ids = mindspore.Tensor([cls_token_location]) # Batch size: 1
-
- >>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
- >>> lm_logits = outputs.logits
- >>> mc_logits = outputs.mc_logits
- ```"""
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- hidden_states = transformer_outputs[0]
-
- lm_logits = self.lm_head(hidden_states)
- mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
-
- mc_loss = None
- if mc_labels is not None:
- mc_loss = ops.cross_entropy(mc_logits.view(-1, mc_logits.shape[-1]), mc_labels.view(-1))
- lm_loss = None
- if labels is not None:
- shift_logits = lm_logits[..., :-1, :]
- shift_labels = labels[..., 1:]
- lm_loss = ops.cross_entropy(shift_logits.view(-1, shift_logits.shape[-1]), shift_labels.view(-1))
-
- if not return_dict:
- output = (lm_logits, mc_logits) + transformer_outputs[1:]
- if mc_loss is not None:
- output = (mc_loss,) + output
- return ((lm_loss,) + output) if lm_loss is not None else output
-
- return GPT2DoubleHeadsModelOutput(
- loss=lm_loss,
- mc_loss=mc_loss,
- logits=lm_logits,
- mc_logits=mc_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
- @staticmethod
- def _reorder_cache(
- past_key_values: Tuple[Tuple[mindspore.Tensor]], beam_idx: mindspore.Tensor
- ) -> Tuple[Tuple[mindspore.Tensor]]:
- """
- This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
- [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
- beam_idx at every generation step.
- """
- return tuple(
- tuple(past_state.index_select(0, beam_idx) for past_state in layer_past)
- for layer_past in past_key_values
- )
-
-
- class GPT2ForSequenceClassification(GPT2PreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.transformer = GPT2Model(config)
- self.score = nn.Dense(config.n_embd, self.num_labels, has_bias=False)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- labels: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
- r"""
- labels (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
- logits = self.score(hidden_states)
-
- if input_ids is not None:
- batch_size, _ = input_ids.shape[:2]
- else:
- batch_size, _ = inputs_embeds.shape[:2]
-
- assert (
- self.config.pad_token_id is not None or batch_size == 1
- ), "Cannot handle batch sizes > 1 if no padding token is defined."
- if self.config.pad_token_id is None:
- sequence_lengths = -1
- else:
- if input_ids is not None:
- sequence_lengths = ops.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
- sequence_lengths = sequence_lengths % input_ids.shape[-1]
-
- else:
- sequence_lengths = -1
- logger.warning(
- f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
- "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
- )
-
- pooled_logits = logits[ops.arange(batch_size), sequence_lengths]
-
- loss = None
- if labels is not None:
- if self.config.problem_type is None:
- if self.num_labels == 1:
- self.config.problem_type = "regression"
- elif self.num_labels > 1 and labels.dtype in (mindspore.int64, mindspore.int32):
- self.config.problem_type = "single_label_classification"
- else:
- self.config.problem_type = "multi_label_classification"
-
- if self.config.problem_type == "regression":
- if self.num_labels == 1:
- loss = ops.mse_loss(pooled_logits.squeeze(), labels.squeeze())
- else:
- loss = ops.mse_loss(pooled_logits, labels)
- elif self.config.problem_type == "single_label_classification":
- loss = ops.cross_entropy(pooled_logits.view(-1, self.num_labels), labels.view(-1))
- elif self.config.problem_type == "multi_label_classification":
- loss = ops.binary_cross_entropy_with_logits(pooled_logits, labels)
- if not return_dict:
- output = (pooled_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return SequenceClassifierOutputWithPast(
- loss=loss,
- logits=pooled_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
-
- class GPT2ForTokenClassification(GPT2PreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
-
- self.transformer = GPT2Model(config)
- if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
- classifier_dropout = config.classifier_dropout
- elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
- classifier_dropout = config.hidden_dropout
- else:
- classifier_dropout = 0.1
- self.dropout = nn.Dropout(p=classifier_dropout)
- self.classifier = nn.Dense(config.hidden_size, config.num_labels)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- past_key_values: Optional[Tuple[Tuple[mindspore.Tensor]]] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- labels: Optional[mindspore.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, TokenClassifierOutput]:
- r"""
- labels (`mindspore.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- hidden_states = transformer_outputs[0]
- hidden_states = self.dropout(hidden_states)
- logits = self.classifier(hidden_states)
-
- loss = None
- if labels is not None:
- loss = ops.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
-
- if not return_dict:
- output = (logits,) + transformer_outputs[2:]
- return ((loss,) + output) if loss is not None else output
-
- return TokenClassifierOutput(
- loss=loss,
- logits=logits,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
- class GPT2ForQuestionAnswering(GPT2PreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.transformer = GPT2Model(config)
- self.qa_outputs = nn.Dense(config.hidden_size, 2)
-
- # Initialize weights and apply final processing
- self.post_init()
-
- def construct(
- self,
- input_ids: Optional[mindspore.Tensor] = None,
- attention_mask: Optional[mindspore.Tensor] = None,
- token_type_ids: Optional[mindspore.Tensor] = None,
- position_ids: Optional[mindspore.Tensor] = None,
- head_mask: Optional[mindspore.Tensor] = None,
- inputs_embeds: Optional[mindspore.Tensor] = None,
- start_positions: Optional[mindspore.Tensor] = None,
- end_positions: Optional[mindspore.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, QuestionAnsweringModelOutput]:
- r"""
- start_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the start of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
- end_positions (`mindspore.Tensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the end of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- outputs = self.transformer(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- sequence_output = outputs[0]
-
- logits = self.qa_outputs(sequence_output)
- start_logits, end_logits = logits.split(1, axis=-1)
- start_logits = start_logits.squeeze(-1)
- end_logits = end_logits.squeeze(-1)
-
- total_loss = None
- if start_positions is not None and end_positions is not None:
- # If we are on multi-GPU, split add a dimension
- if len(start_positions.shape) > 1:
- start_positions = start_positions.squeeze(-1)
- if len(end_positions.shape) > 1:
- end_positions = end_positions.squeeze(-1)
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
- ignored_index = start_logits.shape[1]
- start_positions = start_positions.clamp(0, ignored_index)
- end_positions = end_positions.clamp(0, ignored_index)
-
- start_loss = ops.cross_entropy(start_logits, start_positions, ignore_index=ignored_index)
- end_loss = ops.cross_entropy(end_logits, end_positions, ignore_index=ignored_index)
- total_loss = (start_loss + end_loss) / 2
-
- if not return_dict:
- output = (start_logits, end_logits) + outputs[2:]
- return ((total_loss,) + output) if total_loss is not None else output
-
- return QuestionAnsweringModelOutput(
- loss=total_loss,
- start_logits=start_logits,
- end_logits=end_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
-
- __all__ = [
- "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST",
- "GPT2DoubleHeadsModel",
- "GPT2ForQuestionAnswering",
- "GPT2ForSequenceClassification",
- "GPT2ForTokenClassification",
- "GPT2LMHeadModel",
- "GPT2Model",
- "GPT2PreTrainedModel",
- ]
|