Transformers-源码解析-九十七-

Transformers 源码解析(九十七)

.\models\roberta_prelayernorm\__init__.py

# 引入类型检查依赖,用于在类型检查环境下做条件导入
from typing import TYPE_CHECKING

# 从工具模块中导入相关工具和异常
from ...utils import (
    OptionalDependencyNotAvailable,
    _LazyModule,
    is_flax_available,
    is_tf_available,
    is_torch_available,
)

# 定义模块导入结构的字典,用于存储模块路径和名称
_import_structure = {
    "configuration_roberta_prelayernorm": [
        "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
        "RobertaPreLayerNormConfig",
        "RobertaPreLayerNormOnnxConfig",
    ],
}

# 检查是否支持 Torch 库,若不支持则引发依赖不可用异常
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果支持 Torch 库,则添加相关模型的路径和名称到导入结构字典
    _import_structure["modeling_roberta_prelayernorm"] = [
        "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
        "RobertaPreLayerNormForCausalLM",
        "RobertaPreLayerNormForMaskedLM",
        "RobertaPreLayerNormForMultipleChoice",
        "RobertaPreLayerNormForQuestionAnswering",
        "RobertaPreLayerNormForSequenceClassification",
        "RobertaPreLayerNormForTokenClassification",
        "RobertaPreLayerNormModel",
        "RobertaPreLayerNormPreTrainedModel",
    ]

# 检查是否支持 TensorFlow 库,若不支持则引发依赖不可用异常
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果支持 TensorFlow 库,则添加相关模型的路径和名称到导入结构字典
    _import_structure["modeling_tf_roberta_prelayernorm"] = [
        "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
        "TFRobertaPreLayerNormForCausalLM",
        "TFRobertaPreLayerNormForMaskedLM",
        "TFRobertaPreLayerNormForMultipleChoice",
        "TFRobertaPreLayerNormForQuestionAnswering",
        "TFRobertaPreLayerNormForSequenceClassification",
        "TFRobertaPreLayerNormForTokenClassification",
        "TFRobertaPreLayerNormMainLayer",
        "TFRobertaPreLayerNormModel",
        "TFRobertaPreLayerNormPreTrainedModel",
    ]

# 检查是否支持 Flax 库,若不支持则引发依赖不可用异常
try:
    if not is_flax_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果支持 Flax 库,则添加相关模型的路径和名称到导入结构字典
    _import_structure["modeling_flax_roberta_prelayernorm"] = [
        "FlaxRobertaPreLayerNormForCausalLM",
        "FlaxRobertaPreLayerNormForMaskedLM",
        "FlaxRobertaPreLayerNormForMultipleChoice",
        "FlaxRobertaPreLayerNormForQuestionAnswering",
        "FlaxRobertaPreLayerNormForSequenceClassification",
        "FlaxRobertaPreLayerNormForTokenClassification",
        "FlaxRobertaPreLayerNormModel",
        "FlaxRobertaPreLayerNormPreTrainedModel",
    ]

# 如果在类型检查环境下
if TYPE_CHECKING:
    # 导入 RoBERTa 预训练模型配置文件映射和配置类
    from .configuration_roberta_prelayernorm import (
        ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
        RobertaPreLayerNormConfig,
        RobertaPreLayerNormOnnxConfig,
    )
    
    # 检查是否存在 Torch 库可用,如果不可用则抛出 OptionalDependencyNotAvailable 异常
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        # 导入 RoBERTa 预训练模型相关类(基于 Torch)
        from .modeling_roberta_prelayernorm import (
            ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
            RobertaPreLayerNormForCausalLM,
            RobertaPreLayerNormForMaskedLM,
            RobertaPreLayerNormForMultipleChoice,
            RobertaPreLayerNormForQuestionAnswering,
            RobertaPreLayerNormForSequenceClassification,
            RobertaPreLayerNormForTokenClassification,
            RobertaPreLayerNormModel,
            RobertaPreLayerNormPreTrainedModel,
        )
    
    # 检查是否存在 TensorFlow 库可用,如果不可用则抛出 OptionalDependencyNotAvailable 异常
    try:
        if not is_tf_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        # 导入 RoBERTa 预训练模型相关类(基于 TensorFlow)
        from .modeling_tf_roberta_prelayernorm import (
            TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
            TFRobertaPreLayerNormForCausalLM,
            TFRobertaPreLayerNormForMaskedLM,
            TFRobertaPreLayerNormForMultipleChoice,
            TFRobertaPreLayerNormForQuestionAnswering,
            TFRobertaPreLayerNormForSequenceClassification,
            TFRobertaPreLayerNormForTokenClassification,
            TFRobertaPreLayerNormMainLayer,
            TFRobertaPreLayerNormModel,
            TFRobertaPreLayerNormPreTrainedModel,
        )
    
    # 检查是否存在 Flax 库可用,如果不可用则抛出 OptionalDependencyNotAvailable 异常
    try:
        if not is_flax_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        # 导入 RoBERTa 预训练模型相关类(基于 Flax)
        from .modeling_flax_roberta_prelayernorm import (
            FlaxRobertaPreLayerNormForCausalLM,
            FlaxRobertaPreLayerNormForMaskedLM,
            FlaxRobertaPreLayerNormForMultipleChoice,
            FlaxRobertaPreLayerNormForQuestionAnswering,
            FlaxRobertaPreLayerNormForSequenceClassification,
            FlaxRobertaPreLayerNormForTokenClassification,
            FlaxRobertaPreLayerNormModel,
            FlaxRobertaPreLayerNormPreTrainedModel,
        )
else:
    # 导入系统模块 sys
    import sys

    # 将当前模块的名字作为键,将一个特定的 _LazyModule 对象作为值,存入 sys.modules 字典中
    sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)

.\models\roc_bert\configuration_roc_bert.py

# coding=utf-8
# Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RoCBert model configuration"""

# 从 transformers 库中导入预训练配置的基类 PretrainedConfig
from ...configuration_utils import PretrainedConfig
# 从 transformers 库中导入日志记录工具 logging
from ...utils import logging

# 获取 logger 实例用于日志记录
logger = logging.get_logger(__name__)

# RoCBert 预训练模型配置的映射字典,指定了模型名称到配置文件的 URL 映射
ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
    "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}

# RoCBertConfig 类,继承自 PretrainedConfig 类,用于存储 RoCBert 模型的配置信息
class RoCBertConfig(PretrainedConfig):
    r"""
    This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a
    RoCBert model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the RoCBert
    [weiweishi/roc-bert-base-zh](https://huggingface.co/weiweishi/roc-bert-base-zh) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    ```
    >>> from transformers import RoCBertModel, RoCBertConfig

    >>> # Initializing a RoCBert weiweishi/roc-bert-base-zh style configuration
    >>> configuration = RoCBertConfig()

    >>> # Initializing a model from the weiweishi/roc-bert-base-zh style configuration
    >>> model = RoCBertModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    """

    # 指定模型类型为 "roc_bert"
    model_type = "roc_bert"

    # RoCBertConfig 类的初始化函数,定义了模型的各项配置参数
    def __init__(
        self,
        vocab_size=30522,
        hidden_size=768,
        num_hidden_layers=12,
        num_attention_heads=12,
        intermediate_size=3072,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=2,
        initializer_range=0.02,
        layer_norm_eps=1e-12,
        use_cache=True,
        pad_token_id=0,
        position_embedding_type="absolute",
        classifier_dropout=None,
        enable_pronunciation=True,
        enable_shape=True,
        pronunciation_embed_dim=768,
        pronunciation_vocab_size=910,
        shape_embed_dim=512,
        shape_vocab_size=24858,
        concat_input=True,
        **kwargs,
        ):
        # 初始化模型参数
        self.vocab_size = vocab_size
        # 最大位置编码长度
        self.max_position_embeddings = max_position_embeddings
        # 隐藏层大小
        self.hidden_size = hidden_size
        # 隐藏层数量
        self.num_hidden_layers = num_hidden_layers
        # 注意力头数量
        self.num_attention_heads = num_attention_heads
        # 中间层大小
        self.intermediate_size = intermediate_size
        # 隐藏层激活函数类型
        self.hidden_act = hidden_act
        # 隐藏层Dropout概率
        self.hidden_dropout_prob = hidden_dropout_prob
        # 注意力层Dropout概率
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        # 初始化范围
        self.initializer_range = initializer_range
        # 类型词汇表大小
        self.type_vocab_size = type_vocab_size
        # 层标准化的epsilon值
        self.layer_norm_eps = layer_norm_eps
        # 是否使用缓存
        self.use_cache = use_cache
        # 是否启用发音特征
        self.enable_pronunciation = enable_pronunciation
        # 是否启用形状特征
        self.enable_shape = enable_shape
        # 发音嵌入维度
        self.pronunciation_embed_dim = pronunciation_embed_dim
        # 发音词汇表大小
        self.pronunciation_vocab_size = pronunciation_vocab_size
        # 形状嵌入维度
        self.shape_embed_dim = shape_embed_dim
        # 形状词汇表大小
        self.shape_vocab_size = shape_vocab_size
        # 是否将输入串联起来
        self.concat_input = concat_input
        # 位置编码类型
        self.position_embedding_type = position_embedding_type
        # 分类器Dropout概率
        self.classifier_dropout = classifier_dropout
        # 调用父类的初始化方法,设置填充标记ID和其他关键字参数
        super().__init__(pad_token_id=pad_token_id, **kwargs)

.\models\roc_bert\modeling_roc_bert.py

# 设置文件编码为 UTF-8
# 版权声明,版权归 WeChatAI The HuggingFace Inc. team 所有
# 根据 Apache License, Version 2.0 许可协议,除非符合许可协议的要求,否则不得使用此文件
# 可以在以下网址获取许可协议的副本:http://www.apache.org/licenses/LICENSE-2.0
# 除非适用法律要求或书面同意,否则按“原样”分发本软件
""" PyTorch RoCBert 模型."""

import math  # 导入 math 库
import os  # 导入 os 库
from typing import List, Optional, Tuple, Union  # 导入类型提示相关的类和函数

import torch  # 导入 PyTorch 库
import torch.utils.checkpoint  # 导入 PyTorch 的 checkpoint 模块
from torch import nn  # 从 PyTorch 中导入 nn 模块
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss  # 从 PyTorch 的 nn 模块导入三种损失函数

from ...activations import ACT2FN  # 导入激活函数相关
from ...modeling_outputs import (  # 导入模型输出相关类
    BaseModelOutputWithPastAndCrossAttentions,
    BaseModelOutputWithPoolingAndCrossAttentions,
    CausalLMOutputWithCrossAttentions,
    MaskedLMOutput,
    MultipleChoiceModelOutput,
    QuestionAnsweringModelOutput,
    SequenceClassifierOutput,
    TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel  # 导入预训练模型基类
from ...pytorch_utils import (  # 导入 PyTorch 工具函数
    apply_chunking_to_forward,
    find_pruneable_heads_and_indices,
    prune_linear_layer,
)
from ...utils import (  # 导入通用工具函数
    add_code_sample_docstrings,
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    logging,
    replace_return_docstrings,
)
from .configuration_roc_bert import RoCBertConfig  # 导入 RoCBert 的配置文件

logger = logging.get_logger(__name__)  # 获取日志记录器

_CHECKPOINT_FOR_DOC = "weiweishi/roc-bert-base-zh"  # 预期的文档检查点
_CONFIG_FOR_DOC = "RoCBertConfig"  # 预期的配置文件

# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 768]  # 预期的输出形状

# Token Classification output
_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "ArthurZ/dummy-rocbert-ner"  # 用于令牌分类的检查点
_TOKEN_CLASS_EXPECTED_OUTPUT = [
    "S-EVENT", "S-FAC", "I-ORDINAL", "I-ORDINAL", "E-ORG", "E-LANGUAGE", "E-ORG", "E-ORG", "E-ORG", "E-ORG",
    "I-EVENT", "S-TIME", "S-TIME", "E-LANGUAGE", "S-TIME", "E-DATE", "I-ORDINAL", "E-QUANTITY", "E-LANGUAGE",
    "S-TIME", "B-ORDINAL", "S-PRODUCT", "E-LANGUAGE", "E-LANGUAGE", "E-ORG", "E-LOC", "S-TIME", "I-ORDINAL",
    "S-FAC", "O", "S-GPE", "I-EVENT", "S-GPE", "E-LANGUAGE", "E-ORG", "S-EVENT", "S-FAC", "S-FAC", "S-FAC",
    "E-ORG", "S-FAC", "E-ORG", "S-GPE"
]  # 令牌分类任务的预期输出
_TOKEN_CLASS_EXPECTED_LOSS = 3.62  # 令牌分类任务的预期损失值

# SequenceClassification docstring
_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/dummy-rocbert-seq"  # 用于序列分类的检查点
_SEQ_CLASS_EXPECTED_OUTPUT = "'financial news'"  # 序列分类任务的预期输出
_SEQ_CLASS_EXPECTED_LOSS = 2.31  # 序列分类任务的预期损失值

# QuestionAsnwering docstring
_CHECKPOINT_FOR_QA = "ArthurZ/dummy-rocbert-qa"  # 用于问答任务的检查点
_QA_EXPECTED_OUTPUT = "''"  # 问答任务的预期输出
_QA_EXPECTED_LOSS = 3.75  # 问答任务的预期损失值
_QA_TARGET_START_INDEX = 14  # 问答任务目标答案的起始索引
_QA_TARGET_END_INDEX = 15  # 问答任务目标答案的结束索引

# Maske language modeling
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "weiweishi/roc-bert-base-zh",  # RoCBert 预训练模型的存档列表
    # 查看所有 RoCBert 模型,请访问 https://huggingface.co/models?filter=roc_bert
]
# 从 transformers.models.bert.modeling_bert.load_tf_weights_in_bert 复制并修改为 roc_bert
def load_tf_weights_in_roc_bert(model, config, tf_checkpoint_path):
    """Load tf checkpoints in a pytorch model."""
    try:
        import re  # 导入正则表达式模块,用于处理字符串匹配
        import numpy as np  # 导入 NumPy 库,用于处理数组和数值运算
        import tensorflow as tf  # 导入 TensorFlow 库,用于加载 TensorFlow 模型
    except ImportError:
        # 如果导入失败,打印错误信息并抛出异常
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise

    tf_path = os.path.abspath(tf_checkpoint_path)  # 获取 TensorFlow 检查点文件的绝对路径
    logger.info(f"Converting TensorFlow checkpoint from {tf_path}")  # 记录日志,指示正在转换的 TensorFlow 检查点路径

    # 从 TensorFlow 模型加载权重
    init_vars = tf.train.list_variables(tf_path)  # 列出 TensorFlow 模型中的所有变量及其形状
    names = []
    arrays = []

    # 遍历 TensorFlow 模型的每个变量
    for name, shape in init_vars:
        logger.info(f"Loading TF weight {name} with shape {shape}")  # 记录日志,指示正在加载的 TensorFlow 权重名称和形状
        array = tf.train.load_variable(tf_path, name)  # 加载指定名称的 TensorFlow 变量数据
        names.append(name)  # 将变量名称添加到列表中
        arrays.append(array)  # 将加载的变量数据添加到列表中
    # 遍历输入的 names 和 arrays 列表,每次迭代将 name 和 array 组合在一起
    for name, array in zip(names, arrays):
        # 将 name 按 "/" 分割成列表
        name = name.split("/")
        
        # 检查 name 中是否包含特定的字符串,如果包含则跳过当前迭代
        if any(
            n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
            for n in name
        ):
            # 记录日志,跳过当前迭代
            logger.info(f"Skipping {'/'.join(name)}")
            continue
        
        # 初始化指针为模型本身
        pointer = model
        
        # 遍历 name 中的每个元素
        for m_name in name:
            # 如果 m_name 符合形如 "xxx_0" 的格式,则按下划线分割为列表
            if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
                scope_names = re.split(r"_(\d+)", m_name)
            else:
                scope_names = [m_name]
            
            # 根据 scope_names[0] 的不同值,调整指针的位置
            if scope_names[0] == "kernel" or scope_names[0] == "gamma":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
                pointer = getattr(pointer, "bias")
            elif scope_names[0] == "output_weights":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "squad":
                pointer = getattr(pointer, "classifier")
            else:
                # 尝试从 pointer 中获取 scope_names[0] 对应的属性,若失败则记录日志并跳过当前迭代
                try:
                    pointer = getattr(pointer, scope_names[0])
                except AttributeError:
                    logger.info(f"Skipping {'/'.join(name)}")
                    continue
            
            # 如果 scope_names 的长度大于等于2,则将第二部分作为数字索引,更新 pointer
            if len(scope_names) >= 2:
                num = int(scope_names[1])
                pointer = pointer[num]
        
        # 如果 m_name 的后11个字符是 "_embeddings",则将 pointer 设置为 "weight" 属性
        if m_name[-11:] == "_embeddings":
            pointer = getattr(pointer, "weight")
        elif m_name == "kernel":
            # 如果 m_name 是 "kernel",则将 array 转置为 numpy 数组
            array = np.transpose(array)
        
        # 检查 pointer 和 array 的形状是否匹配,若不匹配则抛出 ValueError
        try:
            if pointer.shape != array.shape:
                raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
        except ValueError as e:
            e.args += (pointer.shape, array.shape)
            raise
        
        # 记录日志,初始化 PyTorch 权重
        logger.info(f"Initialize PyTorch weight {name}")
        
        # 将 numpy 数组 array 转换为 torch 张量,并赋值给 pointer.data
        pointer.data = torch.from_numpy(array)
    
    # 返回更新后的模型
    return model
class RoCBertEmbeddings(nn.Module):
    """Construct the embeddings from word, position, shape, pronunciation and token_type embeddings."""

    def __init__(self, config):
        super().__init__()
        # Word embeddings with vocab_size and hidden_size dimensions, supporting padding
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        # Embeddings for pronunciation with pronunciation_vocab_size and pronunciation_embed_dim dimensions, supporting padding
        self.pronunciation_embed = nn.Embedding(
            config.pronunciation_vocab_size, config.pronunciation_embed_dim, padding_idx=config.pad_token_id
        )
        # Embeddings for shape with shape_vocab_size and shape_embed_dim dimensions, supporting padding
        self.shape_embed = nn.Embedding(
            config.shape_vocab_size, config.shape_embed_dim, padding_idx=config.pad_token_id
        )
        # Position embeddings with max_position_embeddings and hidden_size dimensions
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        # Token type embeddings with type_vocab_size and hidden_size dimensions
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

        # Flags to enable/disable pronunciation and shape embeddings
        self.enable_pronunciation = config.enable_pronunciation
        self.enable_shape = config.enable_shape

        # Linear layer to map concatenated input dimensions to hidden_size if concat_input is enabled
        if config.concat_input:
            input_dim = config.hidden_size
            if self.enable_pronunciation:
                pronunciation_dim = config.pronunciation_embed_dim
                input_dim += pronunciation_dim
            if self.enable_shape:
                shape_dim = config.shape_embed_dim
                input_dim += shape_dim
            self.map_inputs_layer = torch.nn.Linear(input_dim, config.hidden_size)
        else:
            self.map_inputs_layer = None

        # Layer normalization over hidden_size dimension with specified epsilon
        # Note: 'self.LayerNorm' maintains naming consistency with TensorFlow for compatibility
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # Dropout layer with specified probability
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        # Register position_ids buffer as a 1D tensor of length max_position_embeddings
        # This is used for position embeddings and exported during serialization
        self.register_buffer(
            "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
        )
        # Type of position embedding ('absolute' by default)
        self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
        # Register token_type_ids buffer initialized with zeros, same size as position_ids
        self.register_buffer(
            "token_type_ids",
            torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
            persistent=False,
        )

    def forward(
        self,
        input_ids=None,
        input_shape_ids=None,
        input_pronunciation_ids=None,
        token_type_ids=None,
        position_ids=None,
        inputs_embeds=None,
        past_key_values_length=0,
    ):
        # Forward method implementation will be specific to the usage of these embeddings
        pass

# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->RoCBert
class RoCBertSelfAttention(nn.Module):
    # This class definition will follow here but is not included in the requested annotation
    # 初始化方法,接收配置和位置嵌入类型作为参数
    def __init__(self, config, position_embedding_type=None):
        # 调用父类初始化方法
        super().__init__()
        # 检查隐藏层大小是否是注意力头数的整数倍,如果不是且配置中没有嵌入大小属性,则抛出数值错误
        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
            raise ValueError(
                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
                f"heads ({config.num_attention_heads})"
            )

        # 初始化注意力头数和每个注意力头的大小
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        # 初始化查询、键、值的线性层
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        # 初始化dropout层
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
        # 设置位置嵌入类型,默认为绝对位置编码
        self.position_embedding_type = position_embedding_type or getattr(
            config, "position_embedding_type", "absolute"
        )
        # 如果位置嵌入类型是相对键或相对键查询,则初始化距离嵌入
        if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
            self.max_position_embeddings = config.max_position_embeddings
            self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)

        # 设置是否为解码器
        self.is_decoder = config.is_decoder
# 从 transformers.models.bert.modeling_bert.BertSelfOutput 复制代码并修改为 RoCBertSelfOutput
class RoCBertSelfOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 定义一个全连接层,输入和输出维度都是 config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # LayerNorm 层,输入维度 config.hidden_size,使用给定的 epsilon 值初始化
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # Dropout 层,根据给定的概率 config.hidden_dropout_prob 随机丢弃输入张量中的部分元素
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 使用全连接层 dense 对 hidden_states 进行线性变换
        hidden_states = self.dense(hidden_states)
        # 对变换后的 hidden_states 进行 dropout 处理
        hidden_states = self.dropout(hidden_states)
        # 将 dropout 处理后的 hidden_states 与 input_tensor 相加,然后通过 LayerNorm 进行归一化处理
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


# 从 transformers.models.bert.modeling_bert.BertAttention 复制代码并修改为 RoCBertAttention
class RoCBertAttention(nn.Module):
    def __init__(self, config, position_embedding_type=None):
        super().__init__()
        # RoCBertSelfAttention 类的实例化对象
        self.self = RoCBertSelfAttention(config, position_embedding_type=position_embedding_type)
        # RoCBertSelfOutput 类的实例化对象
        self.output = RoCBertSelfOutput(config)
        # 用于存储被裁剪掉的注意力头的集合
        self.pruned_heads = set()

    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        # 查找可裁剪头部的索引
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # 裁剪线性层
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # 更新超参数并存储裁剪的头部
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        output_attentions: Optional[bool] = False,
    ) -> Tuple[torch.Tensor]:
        # 调用 self 层的 forward 方法
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        # 调用 output 层的 forward 方法,将 self 输出和 hidden_states 作为输入
        attention_output = self.output(self_outputs[0], hidden_states)
        # 如果输出注意力信息,则将其添加到输出中
        outputs = (attention_output,) + self_outputs[1:]  # 如果需要输出注意力信息,则添加
        return outputs


# 从 transformers.models.bert.modeling_bert.BertIntermediate 复制代码并修改为 RoCBertIntermediate
class RoCBertIntermediate(nn.Module):
    # 定义类的初始化方法,接收一个配置对象 config
    def __init__(self, config):
        # 调用父类的初始化方法
        super().__init__()
        # 创建一个全连接层,输入大小为 config.hidden_size,输出大小为 config.intermediate_size
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        
        # 判断 config.hidden_act 是否为字符串类型
        if isinstance(config.hidden_act, str):
            # 如果是字符串类型,则从预定义的字典 ACT2FN 中获取对应的激活函数
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            # 如果不是字符串类型,则直接使用配置中的激活函数对象
            self.intermediate_act_fn = config.hidden_act

    # 定义前向传播方法,接收隐藏状态的张量 hidden_states,返回处理后的张量
    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 将隐藏状态张量传入全连接层,得到输出的隐藏状态张量
        hidden_states = self.dense(hidden_states)
        # 将全连接层的输出张量传入中间激活函数,得到最终的隐藏状态张量
        hidden_states = self.intermediate_act_fn(hidden_states)
        # 返回最终的隐藏状态张量作为输出
        return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RoCBert
class RoCBertOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 初始化线性层,将隐藏层大小转换为配置中指定的隐藏大小
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        # 初始化层归一化,使用配置中指定的隐藏层大小和层归一化参数
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # 初始化丢弃层,使用配置中指定的隐藏丢弃概率
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 使用线性层对隐藏状态进行转换
        hidden_states = self.dense(hidden_states)
        # 对转换后的隐藏状态进行丢弃处理
        hidden_states = self.dropout(hidden_states)
        # 对丢弃处理后的隐藏状态进行层归一化,并加上输入张量
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        # 返回处理后的隐藏状态
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RoCBert
class RoCBertLayer(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 设置前向传播中的分块大小,使用配置中指定的前馈传播分块大小
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        # 序列长度维度设定为1
        self.seq_len_dim = 1
        # 初始化注意力层,使用RoCBertAttention类和给定的配置
        self.attention = RoCBertAttention(config)
        # 是否作为解码器使用
        self.is_decoder = config.is_decoder
        # 是否添加交叉注意力
        self.add_cross_attention = config.add_cross_attention
        # 如果添加交叉注意力,且非解码器模型,则引发错误
        if self.add_cross_attention:
            if not self.is_decoder:
                raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
            # 初始化交叉注意力层,使用RoCBertAttention类、绝对位置嵌入类型和给定配置
            self.crossattention = RoCBertAttention(config, position_embedding_type="absolute")
        # 初始化中间层,使用给定配置
        self.intermediate = RoCBertIntermediate(config)
        # 初始化输出层,使用给定配置
        self.output = RoCBertOutput(config)

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        output_attentions: Optional[bool] = False,
    ) -> Tuple[torch.Tensor]:
        # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        # Perform self-attention operation using the provided inputs
        self_attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            head_mask,
            output_attentions=output_attentions,
            past_key_value=self_attn_past_key_value,
        )
        attention_output = self_attention_outputs[0]

        # if decoder, the last output is tuple of self-attn cache
        if self.is_decoder:
            # Extract all outputs except the last two, which are self-attention caches
            outputs = self_attention_outputs[1:-1]
            present_key_value = self_attention_outputs[-1]
        else:
            # For encoder or other cases, include all self-attention outputs
            outputs = self_attention_outputs[1:]  # add self attentions if we output attention weights

        cross_attn_present_key_value = None
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, "crossattention"):
                raise ValueError(
                    f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
                    " by setting `config.add_cross_attention=True`"
                )

            # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
            # Perform cross-attention using the provided inputs
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask,
                head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                cross_attn_past_key_value,
                output_attentions,
            )
            attention_output = cross_attention_outputs[0]
            # Append cross-attention outputs to existing outputs list
            outputs = outputs + cross_attention_outputs[1:-1]  # add cross attentions if we output attention weights

            # add cross-attn cache to positions 3,4 of present_key_value tuple
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value

        # Apply chunking mechanism to feed forward layer
        layer_output = apply_chunking_to_forward(
            self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
        )
        outputs = (layer_output,) + outputs

        # if decoder, return the attn key/values as the last output
        if self.is_decoder:
            outputs = outputs + (present_key_value,)

        return outputs

    def feed_forward_chunk(self, attention_output):
        # Pass attention_output through intermediate and output layers
        intermediate_output = self.intermediate(attention_output)
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output
# 从transformers.models.bert.modeling_bert.BertEncoder复制代码并将Bert->RoCBert
class RoCBertEncoder(nn.Module):
    # RoCBertEncoder类的初始化函数,接受一个config对象作为参数
    def __init__(self, config):
        super().__init__()
        # 将传入的config对象保存到实例变量中
        self.config = config
        # 创建一个包含多个RoCBertLayer对象的ModuleList,层数由config.num_hidden_layers指定
        self.layer = nn.ModuleList([RoCBertLayer(config) for _ in range(config.num_hidden_layers)])
        # 设置梯度检查点标志为False
        self.gradient_checkpointing = False

    # RoCBertEncoder类的前向传播函数,接受多个参数
    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = False,
        output_hidden_states: Optional[bool] = False,
        return_dict: Optional[bool] = True,
    ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
        # 如果输出隐藏状态,则初始化一个空元组;否则设为 None
        all_hidden_states = () if output_hidden_states else None
        # 如果输出注意力权重,则初始化一个空元组;否则设为 None
        all_self_attentions = () if output_attentions else None
        # 如果输出交叉注意力权重且配置允许,则初始化一个空元组;否则设为 None
        all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None

        # 如果启用了梯度检查点且处于训练模式下
        if self.gradient_checkpointing and self.training:
            # 如果 use_cache 为 True,则发出警告并强制设为 False
            if use_cache:
                logger.warning_once(
                    "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
                )
                use_cache = False

        # 如果 use_cache 为 True,则初始化一个空元组;否则设为 None
        next_decoder_cache = () if use_cache else None
        # 遍历每个解码层
        for i, layer_module in enumerate(self.layer):
            # 如果输出隐藏状态,则将当前隐藏状态添加到 all_hidden_states 中
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            # 获取当前层的头部掩码,如果没有则设为 None
            layer_head_mask = head_mask[i] if head_mask is not None else None
            # 获取当前层的过去键值对,如果没有则设为 None
            past_key_value = past_key_values[i] if past_key_values is not None else None

            # 如果启用了梯度检查点且处于训练模式下
            if self.gradient_checkpointing and self.training:
                # 使用梯度检查点功能来计算当前层的输出
                layer_outputs = self._gradient_checkpointing_func(
                    layer_module.__call__,
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
                    past_key_value,
                    output_attentions,
                )
            else:
                # 否则直接调用当前层的模块计算输出
                layer_outputs = layer_module(
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
                    past_key_value,
                    output_attentions,
                )

            # 更新隐藏状态为当前层的输出的第一个元素
            hidden_states = layer_outputs[0]
            # 如果 use_cache 为 True,则将当前层的输出的最后一个元素添加到 next_decoder_cache 中
            if use_cache:
                next_decoder_cache += (layer_outputs[-1],)
            # 如果输出注意力权重,则将当前层的输出的第二个元素添加到 all_self_attentions 中
            if output_attentions:
                all_self_attentions = all_self_attentions + (layer_outputs[1],)
                # 如果配置允许输出交叉注意力权重,则将当前层的输出的第三个元素添加到 all_cross_attentions 中
                if self.config.add_cross_attention:
                    all_cross_attentions = all_cross_attentions + (layer_outputs[2],)

        # 如果输出隐藏状态,则将最终隐藏状态添加到 all_hidden_states 中
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)

        # 如果不返回字典形式的结果,则以元组形式返回特定的值,排除空值
        if not return_dict:
            return tuple(
                v
                for v in [
                    hidden_states,
                    next_decoder_cache,
                    all_hidden_states,
                    all_self_attentions,
                    all_cross_attentions,
                ]
                if v is not None
            )
        # 否则返回包含特定字段的 BaseModelOutputWithPastAndCrossAttentions 对象
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=hidden_states,
            past_key_values=next_decoder_cache,
            hidden_states=all_hidden_states,
            attentions=all_self_attentions,
            cross_attentions=all_cross_attentions,
        )
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RoCBert
class RoCBertPooler(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 定义一个全连接层,输入和输出大小都为 config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 定义激活函数为双曲正切函数
        self.activation = nn.Tanh()

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 从 hidden_states 中取出第一个 token 对应的隐藏状态
        first_token_tensor = hidden_states[:, 0]
        # 将该隐藏状态作为输入,经过全连接层得到池化输出
        pooled_output = self.dense(first_token_tensor)
        # 对池化输出应用激活函数
        pooled_output = self.activation(pooled_output)
        return pooled_output


# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RoCBert
class RoCBertPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 定义一个全连接层,输入和输出大小都为 config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 根据 config 中的隐藏层激活函数类型,选择相应的激活函数
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        # 使用 LayerNorm 对隐藏状态进行归一化
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 经过全连接层变换隐藏状态
        hidden_states = self.dense(hidden_states)
        # 应用激活函数变换隐藏状态
        hidden_states = self.transform_act_fn(hidden_states)
        # 对变换后的隐藏状态进行 LayerNorm 处理
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->RoCBert
class RoCBertLMPredictionHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 使用 RoCBertPredictionHeadTransform 对隐藏状态进行变换
        self.transform = RoCBertPredictionHeadTransform(config)

        # 输出权重与输入嵌入相同,但每个 token 有一个仅输出的偏置
        self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)

        self.bias = nn.Parameter(torch.zeros(config.vocab_size))

        # 需要一个链接以确保偏置能够与 `resize_token_embeddings` 正确调整大小
        self.decoder.bias = self.bias

    def forward(self, hidden_states):
        # 经过变换层处理隐藏状态
        hidden_states = self.transform(hidden_states)
        # 经过线性层得到预测分数
        hidden_states = self.decoder(hidden_states)
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RoCBert
class RoCBertOnlyMLMHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 使用 RoCBertLMPredictionHead 进行 MLM 预测
        self.predictions = RoCBertLMPredictionHead(config)

    def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
        # 调用 RoCBertLMPredictionHead 进行序列输出的预测
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel with Bert->RoCBert,bert->roc_bert
class RoCBertPreTrainedModel(PreTrainedModel):
    """
    RoCBert 模型的基类,继承自 PreTrainedModel
    """
    # RoCBertConfig 类用于配置 RoCBert 模型的参数和配置
    config_class = RoCBertConfig
    # load_tf_weights 指定了在 RoCBert 中加载 TensorFlow 权重的函数
    load_tf_weights = load_tf_weights_in_roc_bert
    # base_model_prefix 指定了 RoCBert 模型的基础模型前缀
    base_model_prefix = "roc_bert"
    # supports_gradient_checkpointing 表示 RoCBert 模型支持梯度检查点
    supports_gradient_checkpointing = True

    def _init_weights(self, module):
        """Initialize the weights"""
        # 如果 module 是 nn.Linear 类型,初始化其权重为正态分布
        if isinstance(module, nn.Linear):
            # PyTorch 版本使用正态分布初始化,与 TensorFlow 的截断正态分布稍有不同
            # 参考 https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果 module 有偏置项,将其初始化为零
            if module.bias is not None:
                module.bias.data.zero_()
        # 如果 module 是 nn.Embedding 类型,初始化其权重为正态分布
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果定义了 padding_idx,则将对应位置的权重初始化为零
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        # 如果 module 是 nn.LayerNorm 类型,初始化其偏置为零,权重为 1.0
        elif isinstance(module, nn.LayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
# RoCBert 模型的文档字符串,描述该模型是一个 PyTorch 的子类,可以作为普通的 PyTorch Module 使用。
# 参考 PyTorch 文档以了解一般用法和行为。
ROC_BERT_START_DOCSTRING = r"""
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`RoCBertConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""

# RoCBert 模型的输入文档字符串,目前为空,需要进一步填写
ROC_BERT_INPUTS_DOCSTRING = r"""
"""


@add_start_docstrings(
    "The bare RoCBert Model transformer outputting raw hidden-states without any specific head on top.",
    ROC_BERT_START_DOCSTRING,
)
# 定义 RoCBertModel 类,继承自 RoCBertPreTrainedModel
class RoCBertModel(RoCBertPreTrainedModel):
    """
    RoCBertModel 可以作为一个编码器(仅自注意力)或解码器使用。在解码器模式下,将在自注意力层之间添加交叉注意力层,
    遵循 [Attention is all you need](https://arxiv.org/abs/1706.03762) 中描述的架构。
    """

    # 从 transformers.models.bert.modeling_bert.BertModel.__init__ 复制过来的初始化方法,将 Bert 替换为 RoCBert
    def __init__(self, config, add_pooling_layer=True):
        super().__init__(config)
        self.config = config

        # 初始化 RoCBertEmbeddings、RoCBertEncoder
        self.embeddings = RoCBertEmbeddings(config)
        self.encoder = RoCBertEncoder(config)

        # 如果指定了 add_pooling_layer,则初始化 RoCBertPooler;否则设为 None
        self.pooler = RoCBertPooler(config) if add_pooling_layer else None

        # 初始化权重并应用最终处理
        self.post_init()

    # 从 transformers.models.bert.modeling_bert.BertModel.get_input_embeddings 复制过来的方法
    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    # 从 transformers.models.bert.modeling_bert.BertModel.set_input_embeddings 复制过来的方法
    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    # 获取发音嵌入的方法
    def get_pronunciation_embeddings(self):
        return self.embeddings.pronunciation_embed

    # 设置发音嵌入的方法
    def set_pronunciation_embeddings(self, value):
        self.embeddings.pronunciation_embed = value

    # 获取形状嵌入的方法
    def get_shape_embeddings(self):
        return self.embeddings.shape_embed

    # 设置形状嵌入的方法
    def set_shape_embeddings(self, value):
        self.embeddings.shape_embed = value

    # 从 transformers.models.bert.modeling_bert.BertModel._prune_heads 复制过来的方法
    def _prune_heads(self, heads_to_prune):
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        """
        # 遍历 heads_to_prune 字典,其中每个键是层号,对应的值是要在该层中剪枝的注意力头列表
        for layer, heads in heads_to_prune.items():
            # 在模型的编码器中找到指定层,然后调用其注意力机制的 prune_heads 方法进行剪枝操作
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=BaseModelOutputWithPoolingAndCrossAttentions,
        config_class=_CONFIG_FOR_DOC,
        expected_output=_EXPECTED_OUTPUT_SHAPE,
    )
    # 重写 forward 方法,增加了详细的文档字符串说明和代码示例说明
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
        past_key_values: Optional[List[torch.FloatTensor]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
@add_start_docstrings(
    """
    RoCBert Model with contrastive loss and masked_lm_loss during the pretraining.
    """,
    ROC_BERT_START_DOCSTRING,
)
class RoCBertForPreTraining(RoCBertPreTrainedModel):
    _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]

    def __init__(self, config):
        super().__init__(config)

        # Initialize RoCBert model with the provided configuration
        self.roc_bert = RoCBertModel(config)
        # Initialize RoCBertOnlyMLMHead for masked language modeling
        self.cls = RoCBertOnlyMLMHead(config)

        # Initialize weights and apply final processing
        self.post_init()

    # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings
    def get_output_embeddings(self):
        # Return the decoder layer of the MLM head
        return self.cls.predictions.decoder

    # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings
    def set_output_embeddings(self, new_embeddings):
        # Set new embeddings for the decoder layer of the MLM head
        self.cls.predictions.decoder = new_embeddings

    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        attack_input_ids: Optional[torch.Tensor] = None,
        attack_input_shape_ids: Optional[torch.Tensor] = None,
        attack_input_pronunciation_ids: Optional[torch.Tensor] = None,
        attack_attention_mask: Optional[torch.Tensor] = None,
        attack_token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        labels_input_ids: Optional[torch.Tensor] = None,
        labels_input_shape_ids: Optional[torch.Tensor] = None,
        labels_input_pronunciation_ids: Optional[torch.Tensor] = None,
        labels_attention_mask: Optional[torch.Tensor] = None,
        labels_token_type_ids: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,
):
    """
    RoCBert Model with a `language modeling` head on top.
    """
    # Implementation details for the forward method are omitted as per the task instructions.
    def __init__(self, config):
        # 调用父类的构造函数初始化对象
        super().__init__(config)

        # 如果配置中设置为解码器,则发出警告,提醒用户配置应为双向自注意力模型
        if config.is_decoder:
            logger.warning(
                "If you want to use `RoCBertForMaskedLM` make sure `config.is_decoder=False` for "
                "bi-directional self-attention."
            )

        # 初始化 RoCBertModel,禁用添加池化层的选项
        self.roc_bert = RoCBertModel(config, add_pooling_layer=False)

        # 初始化 RoCBertOnlyMLMHead
        self.cls = RoCBertOnlyMLMHead(config)

        # 调用后处理函数,初始化权重并应用最终处理
        self.post_init()

    # 从 transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings 复制而来
    # 返回 MLM 头部的输出嵌入
    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    # 从 transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings 复制而来
    # 设置 MLM 头部的输出嵌入为新的嵌入
    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings

    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    # 添加了注释字符串,描述了 forward 方法的输入参数
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:
        ```
        >>> from transformers import AutoTokenizer, RoCBertForMaskedLM
        >>> import torch

        >>> tokenizer = AutoTokenizer.from_pretrained("weiweishi/roc-bert-base-zh")
        >>> model = RoCBertForMaskedLM.from_pretrained("weiweishi/roc-bert-base-zh")

        >>> inputs = tokenizer("法国是首都[MASK].", return_tensors="pt")

        >>> with torch.no_grad():
        ...     logits = model(**inputs).logits

        >>> # retrieve index of {mask}
        >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]

        >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
        >>> tokenizer.decode(predicted_token_id)
        '.'
        ```
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        # 根据传入的参数决定是否使用返回字典模式

        outputs = self.roc_bert(
            input_ids,
            input_shape_ids=input_shape_ids,
            input_pronunciation_ids=input_pronunciation_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        # 将输入传递给 RoCBert 模型进行前向传播,获取输出结果

        sequence_output = outputs[0]
        # 从模型的输出中获取序列输出

        prediction_scores = self.cls(sequence_output)
        # 将序列输出传递给分类层,生成预测分数

        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()  # -100 index = padding token
            # 定义交叉熵损失函数,用于计算掩码语言建模的损失
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
            # 计算掩码语言建模的损失,将预测分数和标签视图展平后输入损失函数中

        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            # 如果不使用返回字典模式,则组装输出元组,包含预测分数和可能的额外输出
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
            # 如果存在掩码语言建模损失,则将其加入输出元组中;否则,只返回预测分数和可能的额外输出

        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
        # 使用 MaskedLMOutput 对象返回结果,包含损失、预测分数、隐藏状态和注意力权重
        ):
            # 获取输入张量的形状
            input_shape = input_ids.shape
            # 获取有效的批处理大小
            effective_batch_size = input_shape[0]

            # 添加一个虚拟标记
            # 如果配置中未定义PAD标记,则抛出数值错误
            if self.config.pad_token_id is None:
                raise ValueError("The PAD token should be defined for generation")

            # 将注意力遮罩张量与一个新的全零张量连接起来,扩展其长度
            attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
            # 创建一个填充了PAD标记的虚拟标记张量
            dummy_token = torch.full(
                (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
            )
            # 将虚拟标记张量连接到输入张量的末尾
            input_ids = torch.cat([input_ids, dummy_token], dim=1)
            # 如果存在输入形状ID,则将虚拟标记张量连接到其末尾
            if input_shape_ids is not None:
                input_shape_ids = torch.cat([input_shape_ids, dummy_token], dim=1)
            # 如果存在输入发音ID,则将虚拟标记张量连接到其末尾
            if input_pronunciation_ids is not None:
                input_pronunciation_ids = torch.cat([input_pronunciation_ids, dummy_token], dim=1)

            # 返回包含更新后张量的字典
            return {
                "input_ids": input_ids,
                "input_shape_ids": input_shape_ids,
                "input_pronunciation_ids": input_pronunciation_ids,
                "attention_mask": attention_mask,
            }
# 定义一个RoCBertForCausalLM类,用于在CLM微调时具有语言建模头部。
# 这里使用了一个装饰器@add_start_docstrings,用来添加类的文档字符串,来自于ROC_BERT_START_DOCSTRING。
class RoCBertForCausalLM(RoCBertPreTrainedModel):
    # 定义了_tied_weights_keys列表,包含与权重绑定相关的键名。
    _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]

    # 从transformers.models.bert.modeling_bert.BertLMHeadModel.__init__方法复制而来,作为RoCBertForCausalLM类的初始化函数。
    def __init__(self, config):
        # 调用父类RoCBertPreTrainedModel的初始化函数。
        super().__init__(config)

        # 如果配置中不是decoder模式,则发出警告信息。
        if not config.is_decoder:
            logger.warning("If you want to use `RoCRoCBertForCausalLM` as a standalone, add `is_decoder=True.`")

        # 创建RoCBertModel对象,用于RoCBertForCausalLM的基础模型,不包含池化层。
        self.roc_bert = RoCBertModel(config, add_pooling_layer=False)
        # 创建RoCBertOnlyMLMHead对象,用于RoCBertForCausalLM的MLM头部。
        self.cls = RoCBertOnlyMLMHead(config)

        # 初始化权重并应用最终处理。
        self.post_init()

    # 从transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings方法复制而来,返回MLM头部的输出嵌入。
    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    # 从transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings方法复制而来,设置MLM头部的输出嵌入。
    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings

    # 从add_start_docstrings_to_model_forward装饰器添加模型前向传播的文档字符串,使用ROC_BERT_INPUTS_DOCSTRING格式化。
    # 还使用replace_return_docstrings装饰器,设置输出类型为CausalLMOutputWithCrossAttentions,配置类为_CONFIG_FOR_DOC。
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        past_key_values: Optional[List[torch.Tensor]] = None,
        labels: Optional[torch.Tensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        pass  # 这里只是函数定义,实际功能由后续代码实现。

    # 定义了prepare_inputs_for_generation方法,准备生成过程中的输入。
    def prepare_inputs_for_generation(
        self,
        input_ids,
        input_shape_ids=None,
        input_pronunciation_ids=None,
        past_key_values=None,
        attention_mask=None,
        **model_kwargs,
    ):
        pass  # 这里只是函数定义,实际功能由后续代码实现。
    ):
        # 获取输入张量的形状
        input_shape = input_ids.shape

        # 如果注意力遮罩为空,则创建全为1的遮罩张量,保证所有位置都被关注
        if attention_mask is None:
            attention_mask = input_ids.new_ones(input_shape)

        # 如果存在过去的键值对,则裁剪输入的decoder_input_ids
        if past_key_values is not None:
            # 获取过去键值对中第一个元素的长度
            past_length = past_key_values[0][0].shape[2]

            # 如果输入的decoder_input_ids长度大于过去长度,则移除前缀部分
            if input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                # 否则保留最后一个输入ID,即旧的行为
                remove_prefix_length = input_ids.shape[1] - 1

            # 对输入的decoder_input_ids进行裁剪
            input_ids = input_ids[:, remove_prefix_length:]
            # 如果存在input_shape_ids,则也进行相应裁剪
            if input_shape_ids is not None:
                input_shape_ids = input_shape_ids[:, -1:]
            # 如果存在input_pronunciation_ids,则也进行相应裁剪
            if input_pronunciation_ids is not None:
                input_pronunciation_ids = input_pronunciation_ids[:, -1:]

        # 返回重排后的张量和相关信息的字典
        return {
            "input_ids": input_ids,
            "input_shape_ids": input_shape_ids,
            "input_pronunciation_ids": input_pronunciation_ids,
            "attention_mask": attention_mask,
            "past_key_values": past_key_values,
        }

    # 从transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache复制而来
    def _reorder_cache(self, past_key_values, beam_idx):
        # 重新排列过去的键值对,以匹配beam搜索的索引顺序
        reordered_past = ()
        for layer_past in past_key_values:
            reordered_past += (
                # 将每个层次的过去状态按照beam_idx重新排序,并保持在原设备上
                tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
            )
        return reordered_past
# 使用自定义的文档字符串添加类的描述信息,指出这是一个基于 RoCBert 的序列分类/回归模型
@add_start_docstrings(
    """RoCBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
    the pooled output) e.g. for GLUE tasks.""",
    ROC_BERT_START_DOCSTRING,
)
# 定义 RoCBertForSequenceClassification 类,继承自 RoCBertPreTrainedModel 类
class RoCBertForSequenceClassification(RoCBertPreTrainedModel):
    # 从 transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ 方法复制而来,将 Bert 替换为 RoCBert,bert 替换为 roc_bert
    def __init__(self, config):
        # 调用父类的初始化方法
        super().__init__(config)
        # 设置类别数量
        self.num_labels = config.num_labels
        # 保存配置信息
        self.config = config

        # 创建 RoCBertModel 对象
        self.roc_bert = RoCBertModel(config)
        # 获取分类器的 dropout 概率,若未设置则使用隐藏层的 dropout 概率
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        # 定义 dropout 层
        self.dropout = nn.Dropout(classifier_dropout)
        # 定义线性分类器
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # 初始化权重并进行最终处理
        self.post_init()

    # 使用自定义的文档字符串添加到模型前向传播方法的描述信息
    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    # 添加示例代码的文档字符串,指定检查点、输出类型、配置类、预期输出和损失
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
        output_type=SequenceClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
        expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
        expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
    )
    # 定义前向传播方法
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        # 参数说明: 输入的张量,包括输入 ID、形状 ID、发音 ID、注意力掩码、标记类型 ID、位置 ID、头部掩码、嵌入输入、标签、输出注意力、输出隐藏状态和是否返回字典

        # 参数说明: 输入的张量,包括输入 ID、形状 ID、发音 ID、注意力掩码、标记类型 ID、位置 ID、头部掩码、嵌入输入、标签、输出注意力、输出隐藏状态和是否返回字典
    ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
        # 如果 return_dict 不为 None,则使用指定的 return_dict;否则使用 self.config.use_return_dict
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # 调用 RoC-BERT 模型进行前向传播
        outputs = self.roc_bert(
            input_ids,
            input_shape_ids=input_shape_ids,
            input_pronunciation_ids=input_pronunciation_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 提取池化后的输出
        pooled_output = outputs[1]

        # 对池化后的输出进行 dropout 处理
        pooled_output = self.dropout(pooled_output)
        # 将 dropout 后的输出传入分类器
        logits = self.classifier(pooled_output)

        # 初始化损失值为 None
        loss = None
        # 如果 labels 不为 None,则计算损失
        if labels is not None:
            # 确定问题类型(如果未指定)
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            # 根据问题类型选择相应的损失函数
            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    # 如果只有一个标签(回归问题),计算均方误差损失
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    # 否则计算均方误差损失
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                # 单标签分类问题,使用交叉熵损失
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                # 多标签分类问题,使用带 logits 的二元交叉熵损失
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)

        # 如果 return_dict 为 False,则返回损失和输出
        if not return_dict:
            output = (logits,) + outputs[2:]  # 包含输出 logits 和可能的额外隐藏状态
            return ((loss,) + output) if loss is not None else output

        # 如果 return_dict 为 True,则以 SequenceClassifierOutput 对象形式返回结果
        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
# 使用装饰器为该类添加文档字符串,描述其作为RoCBert模型的多选分类头的功能
@add_start_docstrings(
    """RoCBert Model with a multiple choice classification head on top (a linear layer on top of
    the pooled output and a softmax) e.g. for RocStories/SWAG tasks.""",
    ROC_BERT_START_DOCSTRING,
)
# 定义RoCBert模型的多选分类器,继承自RoCBertPreTrainedModel类
class RoCBertForMultipleChoice(RoCBertPreTrainedModel):
    
    # 从transformers库中的BertForMultipleChoice.__init__方法复制而来,修改为RoCBert相关命名
    def __init__(self, config):
        super().__init__(config)
        
        # 初始化RoCBert模型,根据给定配置参数
        self.roc_bert = RoCBertModel(config)
        
        # 设置分类器的dropout率为配置中的classifier_dropout,如果未指定则使用hidden_dropout_prob
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        # 定义一个dropout层,用于随机失活
        self.dropout = nn.Dropout(classifier_dropout)
        
        # 定义一个线性层,将RoCBert模型输出的隐藏状态转换为一个输出值,用于多选分类任务
        self.classifier = nn.Linear(config.hidden_size, 1)

        # 初始化权重并应用最终的处理
        self.post_init()

    # 使用装饰器为forward方法添加文档字符串,描述其输入参数和输出结果
    @add_start_docstrings_to_model_forward(
        ROC_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
    )
    # 添加示例代码的文档字符串,显示checkpoint、输出类型和配置类的信息
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=MultipleChoiceModelOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    # 定义模型的前向传播方法
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        # 结束函数签名
    ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        """
        # 根据 `return_dict` 参数确定是否返回字典形式的输出
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        # 获取选择题个数,即第二维度的大小
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        # 重新调整输入数据的形状,将其视作二维的,保留最后一维的大小
        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        input_shape_ids = input_shape_ids.view(-1, input_shape_ids.size(-1)) if input_shape_ids is not None else None
        input_pronunciation_ids = (
            input_pronunciation_ids.view(-1, input_pronunciation_ids.size(-1))
            if input_pronunciation_ids is not None
            else None
        )
        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
        position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
        inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
            if inputs_embeds is not None
            else None
        )

        # 调用 RoCBERT 模型进行推理
        outputs = self.roc_bert(
            input_ids,
            input_shape_ids=input_shape_ids,
            input_pronunciation_ids=input_pronunciation_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 获取汇聚后的输出
        pooled_output = outputs[1]

        # 应用 dropout 操作
        pooled_output = self.dropout(pooled_output)
        # 通过分类器获取 logits
        logits = self.classifier(pooled_output)
        # 重新调整 logits 的形状,以适应多选题的需求
        reshaped_logits = logits.view(-1, num_choices)

        loss = None
        if labels is not None:
            # 计算交叉熵损失
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)

        if not return_dict:
            # 如果不使用字典形式输出,则返回元组
            output = (reshaped_logits,) + outputs[2:]
            return ((loss,) + output) if loss is not None else output

        # 返回多选题模型的输出对象
        return MultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
# 使用自定义的文档字符串初始化 RoCBertForTokenClassification 类,用于在隐藏状态输出之上添加一个令牌分类头部,
# 例如用于命名实体识别(NER)任务的线性层。
@add_start_docstrings(
    """RoCBert Model with a token classification head on top (a linear layer on top of
    the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.""",
    ROC_BERT_START_DOCSTRING,
)
# 继承 RoCBertPreTrainedModel 类并重写 __init__ 方法
class RoCBertForTokenClassification(RoCBertPreTrainedModel):
    
    # 从 transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ 复制过来,将 Bert 替换为 RoCBert,bert 替换为 roc_bert
    def __init__(self, config):
        # 调用 RoCBertPreTrainedModel 类的初始化方法
        super().__init__(config)
        # 设置标签数量
        self.num_labels = config.num_labels
        
        # 使用 RoCBertModel 类初始化 roc_bert 属性,禁用添加池化层
        self.roc_bert = RoCBertModel(config, add_pooling_layer=False)
        
        # 根据配置中的 dropout 设置分类器的 dropout 层
        classifier_dropout = (
            config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
        )
        self.dropout = nn.Dropout(classifier_dropout)
        
        # 使用线性层初始化分类器,输入尺寸为隐藏状态的尺寸,输出尺寸为标签数量
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # 初始化权重并应用最终处理
        self.post_init()

    # 将 @add_start_docstrings_to_model_forward 和 @add_code_sample_docstrings 应用于 forward 方法
    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
        output_type=TokenClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
        expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
        expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
    )
    # 定义 forward 方法,处理输入和返回预测结果
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        # 函数参数说明包含输入张量、掩码、ID 和标签,以及其他相关设置

        # 函数参数说明包含输入张量、掩码、ID 和标签,以及其他相关设置
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        labels: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    `
    # 定义函数签名和返回类型注解,声明函数返回的是一个元组或者 TokenClassifierOutput 类型的对象
    def forward(
        input_ids: torch.LongTensor,
        input_shape_ids: Optional[torch.LongTensor] = None,
        input_pronunciation_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[Tuple, TokenClassifierOutput]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        """
        # 如果 return_dict 为 None,则使用 self.config.use_return_dict 的值
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
    
        # 调用底层的 RoC-BERT 模型进行前向传播,获取输出
        outputs = self.roc_bert(
            input_ids,
            input_shape_ids=input_shape_ids,
            input_pronunciation_ids=input_pronunciation_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
    
        # 获取模型的序列输出
        sequence_output = outputs[0]
    
        # 对序列输出进行 dropout 处理
        sequence_output = self.dropout(sequence_output)
        # 将 dropout 后的结果输入分类器,得到分类 logits
        logits = self.classifier(sequence_output)
    
        # 初始化损失为 None
        loss = None
        # 如果传入了标签 labels,则计算交叉熵损失
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            # 计算 logits 和 labels 的交叉熵损失
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
    
        # 如果 return_dict 为 False,则返回包含 logits 和其他输出的元组
        if not return_dict:
# 使用装饰器为类添加文档字符串,描述了 RoCBert 模型及其在抽取式问答任务(如 SQuAD)中的作用
@add_start_docstrings(
    """RoCBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).""",
    ROC_BERT_START_DOCSTRING,
)
# 定义 RoCBertForQuestionAnswering 类,继承自 RoCBertPreTrainedModel 类
class RoCBertForQuestionAnswering(RoCBertPreTrainedModel):
    
    # 从 transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ 复制并修改而来,将 Bert 替换为 RoCBert
    def __init__(self, config):
        super().__init__(config)
        # 设置类别数目
        self.num_labels = config.num_labels

        # 初始化 RoCBertModel 实例,关闭 pooling 层
        self.roc_bert = RoCBertModel(config, add_pooling_layer=False)
        # 线性层,用于输出 span 起始位置和结束位置的 logits
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        # 初始化权重并进行最终处理
        self.post_init()

    # 使用装饰器为 forward 方法添加文档字符串,描述 RoCBertForQuestionAnswering 模型的输入和输出
    @add_start_docstrings_to_model_forward(ROC_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    # 使用装饰器为 forward 方法添加代码示例的文档字符串,展示其用法和预期输出
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_QA,
        output_type=QuestionAnsweringModelOutput,
        config_class=_CONFIG_FOR_DOC,
        qa_target_start_index=_QA_TARGET_START_INDEX,
        qa_target_end_index=_QA_TARGET_END_INDEX,
        expected_output=_QA_EXPECTED_OUTPUT,
        expected_loss=_QA_EXPECTED_LOSS,
    )
    # 前向传播函数定义,接收多个输入参数,包括输入的 token IDs、注意力掩码等
    def forward(
        self,
        input_ids: Optional[torch.Tensor] = None,
        input_shape_ids: Optional[torch.Tensor] = None,
        input_pronunciation_ids: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        token_type_ids: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.Tensor] = None,
        head_mask: Optional[torch.Tensor] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        start_positions: Optional[torch.Tensor] = None,
        end_positions: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        # 输入参数说明结束

.\models\roc_bert\tokenization_roc_bert.py

# coding=utf-8
# Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for RoCBert."""

import collections  # 导入 collections 模块,用于处理数据集合
import itertools  # 导入 itertools 模块,用于高效循环操作
import json  # 导入 json 模块,用于处理 JSON 数据
import os  # 导入 os 模块,用于操作系统相关的功能
import unicodedata  # 导入 unicodedata 模块,用于 Unicode 字符处理
from typing import Dict, List, Optional, Tuple, Union  # 导入类型提示相关的类和函数

from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace  # 导入 tokenization_utils 中的函数和类
from ...tokenization_utils_base import (  # 导入 tokenization_utils_base 中的各种类和函数
    ENCODE_KWARGS_DOCSTRING,
    ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
    BatchEncoding,
    EncodedInput,
    EncodedInputPair,
    PaddingStrategy,
    PreTokenizedInput,
    PreTokenizedInputPair,
    TensorType,
    TextInput,
    TextInputPair,
    TruncationStrategy,
)
from ...utils import add_end_docstrings, logging  # 导入 utils 中的函数和类

logger = logging.get_logger(__name__)  # 获取当前模块的日志记录器

VOCAB_FILES_NAMES = {  # 定义用于存储词汇文件名的字典
    "vocab_file": "vocab.txt",
    "word_shape_file": "word_shape.json",
    "word_pronunciation_file": "word_pronunciation.json",
}

PRETRAINED_VOCAB_FILES_MAP = {  # 预训练模型的词汇文件映射
    "vocab_file": {
        "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/vocab.txt"
    },
    "word_shape_file": {
        "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/word_shape.json"
    },
    "word_pronunciation_file": {
        "weiweishi/roc-bert-base-zh": (
            "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/word_pronunciation.json"
        )
    },
}

PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {  # 预训练位置嵌入大小的映射
    "weiweishi/roc-bert-base-zh": 512,
}

PRETRAINED_INIT_CONFIGURATION = {  # 预训练初始化配置
    "weiweishi/roc-bert-base-zh": {"do_lower_case": True},
}


# Copied from transformers.models.bert.tokenization_bert.load_vocab
def load_vocab(vocab_file):
    """Loads a vocabulary file into a dictionary."""
    vocab = collections.OrderedDict()  # 创建有序字典来存储词汇
    with open(vocab_file, "r", encoding="utf-8") as reader:  # 打开词汇文件进行读取
        tokens = reader.readlines()  # 逐行读取词汇文件内容
    for index, token in enumerate(tokens):  # 遍历读取的词汇列表
        token = token.rstrip("\n")  # 去除每个词汇末尾的换行符
        vocab[token] = index  # 将词汇和对应的索引添加到字典中
    return vocab  # 返回构建好的词汇字典


# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
def whitespace_tokenize(text):
    """Runs basic whitespace cleaning and splitting on a piece of text."""
    text = text.strip()  # 去除文本首尾的空白字符
    if not text:  # 如果文本为空
        return []  # 返回空列表
    tokens = text.split()  # 使用空白字符分割文本,得到词汇列表
    return tokens  # 返回分割后的词汇列表


class RoCBertTokenizer(PreTrainedTokenizer):
    r"""
    Args:
    # 构建一个 RoCBert 分词器,基于 WordPiece。该分词器继承自 `PreTrainedTokenizer`,其中包含大多数主要方法。
    # 用户应参考该超类以获取有关这些方法的更多信息。
    def __init__(
        self,
        vocab_file: str,
        word_shape_file: str,
        word_pronunciation_file: str,
        do_lower_case: bool = True,
        do_basic_tokenize: bool = True,
        never_split: Iterable[str] = None,
        unk_token: str = "[UNK]",
        sep_token: str = "[SEP]",
        pad_token: str = "[PAD]",
        cls_token: str = "[CLS]",
        mask_token: str = "[MASK]",
        tokenize_chinese_chars: bool = True,
        strip_accents: bool = None
    ):
        # 词汇表文件名
        self.vocab_files_names = VOCAB_FILES_NAMES
        # 预训练模型的词汇文件映射
        self.pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
        # 预训练模型的初始化配置
        self.pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
        # 预训练位置嵌入的最大输入尺寸
        self.max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
    def __init__(
        self,
        vocab_file,
        word_shape_file,
        word_pronunciation_file,
        do_lower_case=True,
        do_basic_tokenize=True,
        never_split=None,
        unk_token="[UNK]",
        sep_token="[SEP]",
        pad_token="[PAD]",
        cls_token="[CLS]",
        mask_token="[MASK]",
        tokenize_chinese_chars=True,
        strip_accents=None,
        **kwargs,
    ):
        # 检查并确保提供的文件路径有效,如果不存在或者不是文件,则抛出异常
        for cur_file in [vocab_file, word_shape_file, word_pronunciation_file]:
            if cur_file is None or not os.path.isfile(cur_file):
                raise ValueError(
                    f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google "
                    "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
                )

        # 加载词汇表文件并存储到 self.vocab 中
        self.vocab = load_vocab(vocab_file)

        # 使用 UTF-8 编码打开词形文件,并加载其内容到 self.word_shape 中
        with open(word_shape_file, "r", encoding="utf8") as in_file:
            self.word_shape = json.load(in_file)

        # 使用 UTF-8 编码打开发音文件,并加载其内容到 self.word_pronunciation 中
        with open(word_pronunciation_file, "r", encoding="utf8") as in_file:
            self.word_pronunciation = json.load(in_file)

        # 创建一个从 ID 到 token 的有序字典,用于反向查找
        self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])

        # 设置是否执行基本的分词操作
        self.do_basic_tokenize = do_basic_tokenize
        if do_basic_tokenize:
            # 如果需要基本分词,则初始化 RoCBertBasicTokenizer 对象
            self.basic_tokenizer = RoCBertBasicTokenizer(
                do_lower_case=do_lower_case,
                never_split=never_split,
                tokenize_chinese_chars=tokenize_chinese_chars,
                strip_accents=strip_accents,
            )
        
        # 使用给定的未知 token 初始化 RoCBertWordpieceTokenizer 对象
        self.wordpiece_tokenizer = RoCBertWordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
        
        # 调用父类的初始化方法,设置通用的 tokenizer 参数
        super().__init__(
            do_lower_case=do_lower_case,
            do_basic_tokenize=do_basic_tokenize,
            never_split=never_split,
            unk_token=unk_token,
            sep_token=sep_token,
            pad_token=pad_token,
            cls_token=cls_token,
            mask_token=mask_token,
            tokenize_chinese_chars=tokenize_chinese_chars,
            strip_accents=strip_accents,
            **kwargs,
        )

    @property
    def do_lower_case(self):
        # 返回当前 tokenizer 是否执行小写化处理的状态
        return self.basic_tokenizer.do_lower_case

    @property
    def vocab_size(self):
        # 返回当前词汇表的大小(词汇表中 token 的数量)
        return len(self.vocab)

    # 从 transformers 库中复制的方法,返回当前 tokenizer 的完整词汇表(包括添加的特殊 token)
    def get_vocab(self):
        return dict(self.vocab, **self.added_tokens_encoder)

    # 从 transformers 库中复制的方法,用于实现 tokenization_bert.BertTokenizer._tokenize 的功能
    # 使用self对象的basic_tokenizer对文本进行基本的tokenization处理
    def _tokenize(self, text, split_special_tokens=False):
        # 初始化空列表,用于存储分割后的token
        split_tokens = []
        # 如果设置了do_basic_tokenize标志为True
        if self.do_basic_tokenize:
            # 调用basic_tokenizer的tokenize方法对文本进行处理
            for token in self.basic_tokenizer.tokenize(
                text, never_split=self.all_special_tokens if not split_special_tokens else None
            ):
                # 如果token在never_split集合中,则直接加入split_tokens列表
                if token in self.basic_tokenizer.never_split:
                    split_tokens.append(token)
                # 否则使用wordpiece_tokenizer对token进行进一步的分割处理,并将结果加入split_tokens列表
                else:
                    split_tokens += self.wordpiece_tokenizer.tokenize(token)
        else:
            # 如果do_basic_tokenize标志为False,则直接使用wordpiece_tokenizer对文本进行处理
            split_tokens = self.wordpiece_tokenizer.tokenize(text)
        # 返回处理后的token列表
        return split_tokens

    # 使用@add_end_docstrings注解来添加文档字符串,描述函数的各个参数
    @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
    def prepare_for_model(
        self,
        ids: List[int],
        shape_ids: List[int],
        pronunciation_ids: List[int],
        pair_ids: Optional[List[int]] = None,
        pair_shape_ids: Optional[List[int]] = None,
        pair_pronunciation_ids: Optional[List[int]] = None,
        add_special_tokens: bool = True,
        padding: Union[bool, str, PaddingStrategy] = False,
        truncation: Union[bool, str, TruncationStrategy] = None,
        max_length: Optional[int] = None,
        stride: int = 0,
        pad_to_multiple_of: Optional[int] = None,
        return_tensors: Optional[Union[str, TensorType]] = None,
        return_token_type_ids: Optional[bool] = None,
        return_attention_mask: Optional[bool] = None,
        return_overflowing_tokens: bool = False,
        return_special_tokens_mask: bool = False,
        return_offsets_mapping: bool = False,
        return_length: bool = False,
        verbose: bool = True,
        prepend_batch_axis: bool = False,
        **kwargs,
    ):
        # 函数用于将输入的ids、shape_ids和pronunciation_ids编码成模型输入
        # 如果提供了pair_ids、pair_shape_ids和pair_pronunciation_ids,也会进行相应处理
        # 设置是否添加特殊token,默认为True
        # 设置padding策略,默认不进行padding
        # 设置截断策略,默认不进行截断
        # 设置最大长度限制,默认为None
        # 设置stride,默认为0
        # 设置是否将输入拆分成单词,默认为False
        # 设置pad_to_multiple_of,用于设置多少的倍数进行padding,默认为None
        # 设置返回的张量类型,默认为None
        # 设置是否返回token类型ID,默认为None
        # 设置是否返回注意力掩码,默认为None
        # 设置是否返回溢出的token,默认为False
        # 设置是否返回特殊token掩码,默认为False
        # 设置是否返回偏移映射,默认为False
        # 设置是否返回长度,默认为False
        # 设置是否输出详细信息,默认为True
        # 设置是否在前面添加批次轴,默认为False
        # kwargs用于接收其它可能的关键字参数
        pass  # 函数体未提供,暂时占位
    # 在类中定义一个方法 `_pad`,用于填充输入数据以达到指定的最大长度
    def _pad(
        # 接受一个字典或批量编码作为输入,其中键是字符串,值是编码后的输入
        self,
        encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
        # 可选参数,指定填充后的最大长度
        max_length: Optional[int] = None,
        # 可选参数,填充策略,默认不进行填充
        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
        # 可选参数,指定填充后的长度是某数的倍数
        pad_to_multiple_of: Optional[int] = None,
        # 可选参数,是否返回注意力掩码
        return_attention_mask: Optional[bool] = None,
    ) -> dict:
        # 如果 return_attention_mask 为 None,则根据模型输入名称判断是否需要返回 attention_mask
        if return_attention_mask is None:
            return_attention_mask = "attention_mask" in self.model_input_names

        # 获取必需的输入,通常为第一个模型输入的编码结果
        required_input = encoded_inputs[self.model_input_names[0]]

        # 根据 padding_strategy 是否为 LONGEST,确定 max_length
        if padding_strategy == PaddingStrategy.LONGEST:
            max_length = len(required_input)

        # 如果 max_length 和 pad_to_multiple_of 都有值,并且 max_length 不是 pad_to_multiple_of 的倍数,调整 max_length
        if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of

        # 判断是否需要进行填充
        needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length

        # 如果需要返回 attention_mask 并且 encoded_inputs 中没有 "attention_mask",则初始化 attention_mask
        if return_attention_mask and "attention_mask" not in encoded_inputs:
            encoded_inputs["attention_mask"] = [1] * len(required_input)

        # 如果需要填充
        if needs_to_be_padded:
            difference = max_length - len(required_input)

            # 如果填充在右侧
            if self.padding_side == "right":
                if return_attention_mask:
                    encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
                if "token_type_ids" in encoded_inputs:
                    encoded_inputs["token_type_ids"] = (
                        encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
                    )
                if "special_tokens_mask" in encoded_inputs:
                    encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
                for key in ["input_shape_ids", "input_pronunciation_ids"]:
                    if key in encoded_inputs:
                        encoded_inputs[key] = encoded_inputs[key] + [self.pad_token_id] * difference
                encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
            # 如果填充在左侧
            elif self.padding_side == "left":
                if return_attention_mask:
                    encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
                if "token_type_ids" in encoded_inputs:
                    encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
                        "token_type_ids"
                    ]
                if "special_tokens_mask" in encoded_inputs:
                    encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
                for key in ["input_shape_ids", "input_pronunciation_ids"]:
                    if key in encoded_inputs:
                        encoded_inputs[key] = [self.pad_token_id] * difference + encoded_inputs[key]
                encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
            else:
                # 如果填充策略不是 "left" 或 "right",抛出异常
                raise ValueError("Invalid padding strategy:" + str(self.padding_side))

        # 返回填充后的 encoded_inputs 字典
        return encoded_inputs
    # 定义一个方法用于批量编码文本或文本对,支持多种输入类型
    @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
    def _batch_encode_plus(
        self,
        batch_text_or_text_pairs: Union[
            List[TextInput],
            List[TextInputPair],
            List[PreTokenizedInput],
            List[PreTokenizedInputPair],
            List[EncodedInput],
            List[EncodedInputPair],
        ],
        add_special_tokens: bool = True,  # 是否添加特殊标记
        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,  # 填充策略,默认不填充
        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,  # 截断策略,默认不截断
        max_length: Optional[int] = None,  # 最大长度限制
        stride: int = 0,  # 步长,默认为0
        is_split_into_words: bool = False,  # 输入是否已经分成单词
        pad_to_multiple_of: Optional[int] = None,  # 填充到指定的倍数
        return_tensors: Optional[Union[str, TensorType]] = None,  # 返回的张量类型
        return_token_type_ids: Optional[bool] = None,  # 是否返回token类型IDs
        return_attention_mask: Optional[bool] = None,  # 是否返回注意力掩码
        return_overflowing_tokens: bool = False,  # 是否返回溢出的tokens
        return_special_tokens_mask: bool = False,  # 是否返回特殊tokens的掩码
        return_offsets_mapping: bool = False,  # 是否返回偏移映射
        return_length: bool = False,  # 是否返回长度
        verbose: bool = True,  # 是否详细输出
        **kwargs,  # 其他关键字参数
    ):
        # 方法功能的详细描述和补充参数文档参见ENCODE_KWARGS_DOCSTRING和ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING
        pass

    # 定义一个方法用于为模型准备批量数据,支持多种输入类型
    def _batch_prepare_for_model(
        self,
        batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
        batch_shape_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
        batch_pronunciation_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
        add_special_tokens: bool = True,  # 是否添加特殊标记
        padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,  # 填充策略,默认不填充
        truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,  # 截断策略,默认不截断
        max_length: Optional[int] = None,  # 最大长度限制
        stride: int = 0,  # 步长,默认为0
        pad_to_multiple_of: Optional[int] = None,  # 填充到指定的倍数
        return_tensors: Optional[str] = None,  # 返回的张量类型
        return_token_type_ids: Optional[bool] = None,  # 是否返回token类型IDs
        return_attention_mask: Optional[bool] = None,  # 是否返回注意力掩码
        return_overflowing_tokens: bool = False,  # 是否返回溢出的tokens
        return_special_tokens_mask: bool = False,  # 是否返回特殊tokens的掩码
        return_length: bool = False,  # 是否返回长度
        verbose: bool = True,  # 是否详细输出
        **kwargs,  # 其他关键字参数
    ):
        # 方法用于准备模型输入数据的详细描述和补充参数文档参见相应文档
        pass
    ) -> BatchEncoding:
        """
        准备一个输入 id 序列,或者一对输入 id 序列,以便可以被模型使用。它添加特殊标记,根据特殊标记截断序列,同时考虑特殊标记,并管理一个移动窗口(带有用户定义的步长)来处理溢出的标记。

        Args:
            batch_ids_pairs: tokenized input ids 或者 input ids pairs 的列表
            batch_shape_ids_pairs: tokenized input shape ids 或者 input shape ids pairs 的列表
            batch_pronunciation_ids_pairs: tokenized input pronunciation ids 或者 input pronunciation ids pairs 的列表
        """

        batch_outputs = {}
        for i, (first_ids, second_ids) in enumerate(batch_ids_pairs):
            first_shape_ids, second_shape_ids = batch_shape_ids_pairs[i]
            first_pronunciation_ids, second_pronunciation_ids = batch_pronunciation_ids_pairs[i]
            outputs = self.prepare_for_model(
                first_ids,
                first_shape_ids,
                first_pronunciation_ids,
                pair_ids=second_ids,
                pair_shape_ids=second_shape_ids,
                pair_pronunciation_ids=second_pronunciation_ids,
                add_special_tokens=add_special_tokens,
                padding=PaddingStrategy.DO_NOT_PAD.value,  # 在后续批处理中进行填充
                truncation=truncation_strategy.value,
                max_length=max_length,
                stride=stride,
                pad_to_multiple_of=None,  # 在后续批处理中进行填充
                return_attention_mask=False,  # 在后续批处理中进行填充
                return_token_type_ids=return_token_type_ids,
                return_overflowing_tokens=return_overflowing_tokens,
                return_special_tokens_mask=return_special_tokens_mask,
                return_length=return_length,
                return_tensors=None,  # 最终将整个批次转换为张量
                prepend_batch_axis=False,
                verbose=verbose,
            )

            for key, value in outputs.items():
                if key not in batch_outputs:
                    batch_outputs[key] = []
                batch_outputs[key].append(value)

        batch_outputs = self.pad(
            batch_outputs,
            padding=padding_strategy.value,
            max_length=max_length,
            pad_to_multiple_of=pad_to_multiple_of,
            return_attention_mask=return_attention_mask,
        )

        batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)

        return batch_outputs

    # 从 transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id 复制过来的
    def _convert_token_to_id(self, token):
        """使用词汇表将一个标记(字符串)转换为对应的 id。"""
        return self.vocab.get(token, self.vocab.get(self.unk_token))
    # 使用 shape vocab 将给定的 token 转换成对应的 shape_id
    def _convert_token_to_shape_id(self, token):
        """Converts a token (str) in an shape_id using the shape vocab."""
        return self.word_shape.get(token, self.word_shape.get(self.unk_token))

    # 将一组 token 转换成对应的 shape_ids 列表或单个 shape_id
    def convert_tokens_to_shape_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
        if tokens is None:
            return None

        ids = []
        for token in tokens:
            ids.append(self._convert_token_to_shape_id(token))
        return ids

    # 使用 pronunciation vocab 将给定的 token 转换成对应的 pronunciation_id
    def _convert_token_to_pronunciation_id(self, token):
        """Converts a token (str) in an shape_id using the shape vocab."""
        return self.word_pronunciation.get(token, self.word_pronunciation.get(self.unk_token))

    # 将一组 token 转换成对应的 pronunciation_ids 列表或单个 pronunciation_id
    def convert_tokens_to_pronunciation_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
        if tokens is None:
            return None

        ids = []
        for token in tokens:
            ids.append(self._convert_token_to_pronunciation_id(token))
        return ids

    # 从词汇表中将给定的 index 转换成对应的 token
    # 这里的词汇表是 ids_to_tokens 字典
    def _convert_id_to_token(self, index):
        """Converts an index (integer) in a token (str) using the vocab."""
        return self.ids_to_tokens.get(index, self.unk_token)

    # 将一组 token 序列转换成单个字符串,去除特殊标记 "##"
    def convert_tokens_to_string(self, tokens):
        """Converts a sequence of tokens (string) in a single string."""
        out_string = " ".join(tokens).replace(" ##", "").strip()
        return out_string

    # 构建包含特殊 token 的模型输入序列,用于序列分类任务
    # 可能是单个序列 `[CLS] X [SEP]` 或者序列对 `[CLS] A [SEP] B [SEP]`
    def build_inputs_with_special_tokens(
        self,
        token_ids_0: List[int],
        token_ids_1: Optional[List[int]] = None,
        cls_token_id: int = None,
        sep_token_id: int = None,
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A BERT sequence has the following format:

        - single sequence: `[CLS] X [SEP]`
        - pair of sequences: `[CLS] A [SEP] B [SEP]`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        """
        cls = [self.cls_token_id] if cls_token_id is None else [cls_token_id]
        sep = [self.sep_token_id] if sep_token_id is None else [sep_token_id]
        if token_ids_1 is None:
            return cls + token_ids_0 + sep
        return cls + token_ids_0 + sep + token_ids_1 + sep

    # 获取特殊 token 的掩码
    # 由于该函数被省略了,文档中提到的函数为 transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
    # 检查是否已经包含特殊标记,如果是,则调用父类方法返回特殊标记掩码
    if already_has_special_tokens:
        return super().get_special_tokens_mask(
            token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
        )

    # 如果有第二个序列token_ids_1,则返回带有特殊标记的掩码列表
    if token_ids_1 is not None:
        return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]

    # 否则,只返回第一个序列token_ids_0带有特殊标记的掩码列表
    return [1] + ([0] * len(token_ids_0)) + [1]



    # 从给定的序列中创建token type IDs,用于序列对分类任务
    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
        pair mask has the following format:

        ```
        0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
        | first sequence    | second sequence |
        ```

        If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        """
        sep = [self.sep_token_id]  # 分隔符token的ID列表
        cls = [self.cls_token_id]  # 类别标记token的ID列表
        if token_ids_1 is None:
            # 如果没有第二个序列,返回仅包含第一个序列的token type IDs列表(都是0)
            return len(cls + token_ids_0 + sep) * [0]
        # 否则,返回包含两个序列的token type IDs列表,第一个序列为0,第二个序列为1
        return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
    # 将词汇表和相关文件保存到指定的目录中,并返回文件路径的元组
    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str, str, str]:
        index = 0  # 初始化索引变量
        if os.path.isdir(save_directory):  # 检查保存目录是否存在
            # 构建词汇表文件的路径,如果有前缀,则添加前缀,否则直接使用文件名
            vocab_file = os.path.join(
                save_directory,
                (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"],
            )
            # 构建词形文件的路径,同上
            word_shape_file = os.path.join(
                save_directory,
                (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_shape_file"],
            )
            # 构建发音文件的路径,同上
            word_pronunciation_file = os.path.join(
                save_directory,
                (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["word_pronunciation_file"],
            )
        else:
            # 如果目录不存在,则抛出值错误
            raise ValueError(
                f"Can't find a directory at path '{save_directory}'. To load the vocabulary from a Google "
                "pretrained model use `tokenizer = RoCBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
            )

        # 打开并写入词汇表文件
        with open(vocab_file, "w", encoding="utf-8") as writer:
            # 遍历词汇表中的每个单词及其索引
            for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
                # 检查索引是否连续,若不连续则记录警告
                if index != token_index:
                    logger.warning(
                        f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
                        " Please check that the vocabulary is not corrupted!"
                    )
                    index = token_index
                # 写入单词到文件中,并换行
                writer.write(token + "\n")
                index += 1

        # 打开并写入词形文件,以 JSON 格式存储
        with open(word_shape_file, "w", encoding="utf8") as writer:
            json.dump(self.word_shape, writer, ensure_ascii=False, indent=4, separators=(", ", ": "))

        # 打开并写入发音文件,以 JSON 格式存储
        with open(word_pronunciation_file, "w", encoding="utf8") as writer:
            json.dump(self.word_pronunciation, writer, ensure_ascii=False, indent=4, separators=(", ", ": "))

        # 返回保存的文件路径的元组
        return (
            vocab_file,
            word_shape_file,
            word_pronunciation_file,
        )
# 从 transformers.models.bert.tokenization_bert.BasicTokenizer 复制的 RoCBertBasicTokenizer 类定义,
# 将 BasicTokenizer 更名为 RoCBertBasicTokenizer
class RoCBertBasicTokenizer(object):
    """
    构建一个 RoCBertBasicTokenizer 类,用于执行基本的分词(如标点符号分割、转换为小写等)。

    Args:
        do_lower_case (`bool`, *optional*, defaults to `True`):
            是否在分词时将输入转换为小写。
        never_split (`Iterable`, *optional*):
            在分词时不会被拆分的标记集合。仅在 `do_basic_tokenize=True` 时有效。
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            是否对中文字符进行分词。

            对于日文,这个选项应该关闭(参见此 [issue](https://github.com/huggingface/transformers/issues/328))。
        strip_accents (`bool`, *optional*):
            是否去除所有的重音符号。如果未指定此选项,则将由 `lowercase` 的值确定(与原始 BERT 一样)。
        do_split_on_punc (`bool`, *optional*, defaults to `True`):
            在某些情况下,我们希望跳过基本的标点符号分割,以便后续的分词可以捕获单词的完整上下文,例如缩略词。
    """

    def __init__(
        self,
        do_lower_case=True,
        never_split=None,
        tokenize_chinese_chars=True,
        strip_accents=None,
        do_split_on_punc=True,
    ):
        if never_split is None:
            never_split = []
        self.do_lower_case = do_lower_case  # 是否将输入转换为小写
        self.never_split = set(never_split)  # 在分词时不拆分的标记集合
        self.tokenize_chinese_chars = tokenize_chinese_chars  # 是否对中文字符进行分词
        self.strip_accents = strip_accents  # 是否去除所有重音符号
        self.do_split_on_punc = do_split_on_punc  # 是否在基本标点符号上进行分割
    # Tokenize a piece of text using basic tokenization rules.
    def tokenize(self, text, never_split=None):
        """
        Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.

        Args:
            never_split (`List[str]`, *optional*)
                Kept for backward compatibility purposes. Now implemented directly at the base class level (see
                [`PreTrainedTokenizer.tokenize`]) List of token not to split.
        """
        # Create a set of tokens that should never be split during tokenization.
        never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
        # Clean the input text.
        text = self._clean_text(text)

        # Handle Chinese character tokenization if enabled.
        if self.tokenize_chinese_chars:
            text = self._tokenize_chinese_chars(text)
        
        # Normalize the text to Unicode NFC form to ensure uniform representation of characters.
        unicode_normalized_text = unicodedata.normalize("NFC", text)
        # Tokenize the text on whitespace.
        orig_tokens = whitespace_tokenize(unicode_normalized_text)
        split_tokens = []
        
        # Iterate over each token and process according to tokenizer settings.
        for token in orig_tokens:
            if token not in never_split:
                # Lowercase the token if required by the tokenizer settings.
                if self.do_lower_case:
                    token = token.lower()
                    # Strip accents from the token if specified.
                    if self.strip_accents is not False:
                        token = self._run_strip_accents(token)
                elif self.strip_accents:
                    token = self._run_strip_accents(token)
            # Split the token on punctuation marks while respecting tokens in never_split.
            split_tokens.extend(self._run_split_on_punc(token, never_split))

        # Tokenize the final output tokens on whitespace and return them.
        output_tokens = whitespace_tokenize(" ".join(split_tokens))
        return output_tokens

    # Remove accents (diacritical marks) from a piece of text.
    def _run_strip_accents(self, text):
        """Strips accents from a piece of text."""
        # Normalize the text to Unicode NFD form to decompose characters and accents.
        text = unicodedata.normalize("NFD", text)
        output = []
        # Iterate over each character in the normalized text.
        for char in text:
            # Check the Unicode category of the character; "Mn" denotes a nonspacing mark (accents).
            cat = unicodedata.category(char)
            if cat == "Mn":
                continue  # Skip combining characters (accents).
            output.append(char)
        # Join the processed characters back into a string without accents.
        return "".join(output)
    def _run_split_on_punc(self, text, never_split=None):
        """Splits punctuation on a piece of text."""
        # 如果不需要在标点符号处分割,或者文本在 never_split 中,直接返回文本列表
        if not self.do_split_on_punc or (never_split is not None and text in never_split):
            return [text]
        # 将文本转换为字符列表
        chars = list(text)
        i = 0
        start_new_word = True
        output = []
        # 遍历字符列表
        while i < len(chars):
            char = chars[i]
            # 如果是标点符号,则将其作为新的单独列表项添加到 output 中
            if _is_punctuation(char):
                output.append([char])
                start_new_word = True
            else:
                # 如果不是标点符号,根据 start_new_word 的状态判断是否开始一个新的单词
                if start_new_word:
                    output.append([])
                start_new_word = False
                # 将字符添加到当前的最后一个列表项中
                output[-1].append(char)
            i += 1

        # 将列表中的列表项连接成字符串并返回
        return ["".join(x) for x in output]

    def _tokenize_chinese_chars(self, text):
        """Adds whitespace around any CJK character."""
        # 初始化输出列表
        output = []
        # 遍历文本中的每个字符
        for char in text:
            cp = ord(char)
            # 如果字符是中日韩字符,则在其前后添加空格并加入输出列表
            if self._is_chinese_char(cp):
                output.append(" ")
                output.append(char)
                output.append(" ")
            else:
                # 否则直接加入输出列表
                output.append(char)
        # 将输出列表连接成字符串并返回
        return "".join(output)

    def _is_chinese_char(self, cp):
        """Checks whether CP is the codepoint of a CJK character."""
        # 检查给定的码点是否在中日韩字符的 Unicode 块范围内
        if (
            (cp >= 0x4E00 and cp <= 0x9FFF)
            or (cp >= 0x3400 and cp <= 0x4DBF)  #
            or (cp >= 0x20000 and cp <= 0x2A6DF)  #
            or (cp >= 0x2A700 and cp <= 0x2B73F)  #
            or (cp >= 0x2B740 and cp <= 0x2B81F)  #
            or (cp >= 0x2B820 and cp <= 0x2CEAF)  #
            or (cp >= 0xF900 and cp <= 0xFAFF)
            or (cp >= 0x2F800 and cp <= 0x2FA1F)  #
        ):  #
            return True

        return False

    def _clean_text(self, text):
        """Performs invalid character removal and whitespace cleanup on text."""
        # 初始化输出列表
        output = []
        # 遍历文本中的每个字符
        for char in text:
            cp = ord(char)
            # 如果字符是无效字符或控制字符,跳过
            if cp == 0 or cp == 0xFFFD or _is_control(char):
                continue
            # 如果字符是空白字符,则替换为单个空格;否则直接加入输出列表
            if _is_whitespace(char):
                output.append(" ")
            else:
                output.append(char)
        # 将输出列表连接成字符串并返回
        return "".join(output)
# Copied from  transformers.models.bert.tokenization_bert.WordpieceTokenizer with WordpieceTokenizer->RoCBertWordpieceTokenizer
class RoCBertWordpieceTokenizer(object):
    """Runs WordPiece tokenization."""

    def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
        # 初始化方法,接受词汇表、未知token以及每个单词的最大字符数作为参数
        self.vocab = vocab
        self.unk_token = unk_token
        self.max_input_chars_per_word = max_input_chars_per_word

    def tokenize(self, text):
        """
        Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
        tokenization using the given vocabulary.

        For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.

        Args:
            text: A single token or whitespace separated tokens. This should have
                already been passed through *BasicTokenizer*.

        Returns:
            A list of wordpiece tokens.
        """
        # 将文本分词为 wordpiece tokens 的方法
        output_tokens = []
        for token in whitespace_tokenize(text):
            chars = list(token)
            if len(chars) > self.max_input_chars_per_word:
                # 如果单词长度超过设定的最大字符数,将其标记为未知token
                output_tokens.append(self.unk_token)
                continue

            is_bad = False
            start = 0
            sub_tokens = []
            while start < len(chars):
                end = len(chars)
                cur_substr = None
                while start < end:
                    substr = "".join(chars[start:end])
                    if start > 0:
                        substr = "##" + substr
                    if substr in self.vocab:
                        cur_substr = substr
                        break
                    end -= 1
                if cur_substr is None:
                    is_bad = True
                    break
                sub_tokens.append(cur_substr)
                start = end

            if is_bad:
                # 如果无法找到匹配的子串,则将整个单词标记为未知token
                output_tokens.append(self.unk_token)
            else:
                output_tokens.extend(sub_tokens)
        return output_tokens

.\models\roc_bert\__init__.py

# 版权声明和许可信息
#
# 根据 Apache 许可证版本 2.0(“许可证”)授权;
# 除非符合许可证的规定,否则不得使用此文件。
# 您可以在以下网址获取许可证的副本:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# 除非适用法律要求或书面同意,否则按“原样”分发软件,
# 没有任何明示或暗示的保证或条件。
# 有关许可证详细信息,请参阅许可证。
from typing import TYPE_CHECKING

# 从 utils 中导入一些依赖项
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available

# 定义导入结构
_import_structure = {
    "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
    "tokenization_roc_bert": ["RoCBertTokenizer"],
}

# 检查是否可用 tokenizers,如果不可用则抛出异常
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    pass  # 如果可用则继续

# 检查是否可用 torch,如果不可用则抛出异常
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果可用,则添加 modeling_roc_bert 到导入结构中
    _import_structure["modeling_roc_bert"] = [
        "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
        "RoCBertForCausalLM",
        "RoCBertForMaskedLM",
        "RoCBertForMultipleChoice",
        "RoCBertForPreTraining",
        "RoCBertForQuestionAnswering",
        "RoCBertForSequenceClassification",
        "RoCBertForTokenClassification",
        "RoCBertLayer",
        "RoCBertModel",
        "RoCBertPreTrainedModel",
        "load_tf_weights_in_roc_bert",
    ]

# 如果是类型检查阶段
if TYPE_CHECKING:
    # 从相应模块导入所需符号
    from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
    from .tokenization_roc_bert import RoCBertTokenizer

    # 检查 tokenizers 是否可用,如果可用则抛出异常
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        raise OptionalDependencyNotAvailable()  # 如果可用则引发异常

    # 检查 torch 是否可用,如果可用则从 modeling_roc_bert 导入符号
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .modeling_roc_bert import (
            ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
            RoCBertForCausalLM,
            RoCBertForMaskedLM,
            RoCBertForMultipleChoice,
            RoCBertForPreTraining,
            RoCBertForQuestionAnswering,
            RoCBertForSequenceClassification,
            RoCBertForTokenClassification,
            RoCBertLayer,
            RoCBertModel,
            RoCBertPreTrainedModel,
            load_tf_weights_in_roc_bert,
        )

# 如果不是类型检查阶段,则导入 _LazyModule 并设置当前模块的属性为 LazyModule
else:
    import sys

    sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)

.\models\roformer\configuration_roformer.py

# 定义了 RoFormer 模型的配置类,继承自 PretrainedConfig,用于存储 RoFormer 模型的配置信息
class RoFormerConfig(PretrainedConfig):
    r"""
    This is the configuration class to store the configuration of a [`RoFormerModel`]. It is used to instantiate an
    RoFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the RoFormer
    [junnyu/roformer_chinese_base](https://huggingface.co/junnyu/roformer_chinese_base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.
    # 定义 RoFormer 模型的配置类,用于配置模型的各种参数
    Args:
        vocab_size (`int`, *optional*, defaults to 50000):
            RoFormer 模型的词汇表大小,定义了可以由输入 `inputs_ids` 表示的不同 token 数量。
        embedding_size (`int`, *optional*, defaults to None):
            编码器层和池化层的维度。如果未提供,则默认为 `hidden_size`。
        hidden_size (`int`, *optional*, defaults to 768):
            编码器层和池化层的维度。
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Transformer 编码器中隐藏层的数量。
        num_attention_heads (`int`, *optional*, defaults to 12):
            Transformer 编码器中每个注意力层的注意力头数。
        intermediate_size (`int`, *optional*, defaults to 3072):
            Transformer 编码器中 "intermediate"(即前馈)层的维度。
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            编码器和池化器中的非线性激活函数(函数或字符串)。支持 `"gelu"`, `"relu"`, `"selu"` 和 `"gelu_new"`。
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            嵌入层、编码器和池化器中所有全连接层的 dropout 概率。
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            注意力概率的 dropout 比例。
        max_position_embeddings (`int`, *optional*, defaults to 1536):
            模型可能使用的最大序列长度。通常设置为一个较大的值(例如 512、1024 或 1536)。
        type_vocab_size (`int`, *optional*, defaults to 2):
            调用 [`RoFormerModel`] 或 [`TFRoFormerModel`] 时传递的 `token_type_ids` 的词汇表大小。
        initializer_range (`float`, *optional*, defaults to 0.02):
            初始化所有权重矩阵的截断正态初始化器的标准差。
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            层归一化层使用的 epsilon。
        is_decoder (`bool`, *optional*, defaults to `False`):
            模型是否用作解码器。如果为 `False`,则模型用作编码器。
        use_cache (`bool`, *optional*, defaults to `True`):
            模型是否应返回最后的键/值注意力(不是所有模型都使用)。仅在 `config.is_decoder=True` 时相关。
        rotary_value (`bool`, *optional*, defaults to `False`):
            是否在值层应用旋转位置嵌入。
    # 初始化一个 RoFormer 风格的配置对象
    configuration = RoFormerConfig()
    
    # 使用 RoFormer 配置对象初始化一个模型,模型的权重是随机初始化的
    model = RoFormerModel(configuration)
    
    # 获取模型的配置信息
    configuration = model.config
# 定义 RoFormer 模型在 ONNX 格式中的配置类,继承自 OnnxConfig 类
class RoFormerOnnxConfig(OnnxConfig):
    
    # 定义 inputs 属性,返回一个映射,将字符串映射到一个映射(字典),其键为整数,值为字符串
    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        
        # 如果任务类型为多选题
        if self.task == "multiple-choice":
            # 动态轴定义,将 0 对应到 "batch",1 对应到 "choice",2 对应到 "sequence"
            dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
        else:
            # 否则,动态轴定义,将 0 对应到 "batch",1 对应到 "sequence"
            dynamic_axis = {0: "batch", 1: "sequence"}
        
        # 重新赋值动态轴定义,将 0 对应到 "batch",1 对应到 "sequence"
        dynamic_axis = {0: "batch", 1: "sequence"}
        
        # 返回有序字典,包含输入名称到动态轴定义的映射
        return OrderedDict(
            [
                ("input_ids", dynamic_axis),        # 输入名称 "input_ids" 对应动态轴定义
                ("attention_mask", dynamic_axis),   # 输入名称 "attention_mask" 对应动态轴定义
                ("token_type_ids", dynamic_axis),   # 输入名称 "token_type_ids" 对应动态轴定义
            ]
        )

.\models\roformer\convert_roformer_original_tf_checkpoint_to_pytorch.py

# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RoFormer checkpoint."""


import argparse  # 导入解析命令行参数的模块

import torch  # 导入 PyTorch 深度学习框架

from transformers import RoFormerConfig, RoFormerForMaskedLM, load_tf_weights_in_roformer  # 导入 RoFormer 模型相关的类和函数
from transformers.utils import logging  # 导入日志记录模块


logging.set_verbosity_info()  # 设置日志输出级别为 info


def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
    # 初始化 PyTorch 模型
    config = RoFormerConfig.from_json_file(bert_config_file)
    print(f"Building PyTorch model from configuration: {config}")  # 打印配置信息
    model = RoFormerForMaskedLM(config)  # 基于配置构建 RoFormer 的 Masked LM 模型

    # 从 TensorFlow checkpoint 加载权重
    load_tf_weights_in_roformer(model, config, tf_checkpoint_path)

    # 保存 PyTorch 模型
    print(f"Save PyTorch model to {pytorch_dump_path}")
    torch.save(model.state_dict(), pytorch_dump_path, _use_new_zipfile_serialization=False)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()  # 创建参数解析器

    # 必需参数
    parser.add_argument(
        "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
    )
    parser.add_argument(
        "--bert_config_file",
        default=None,
        type=str,
        required=True,
        help=(
            "The config json file corresponding to the pre-trained BERT model. \n"
            "This specifies the model architecture."
        ),
    )
    parser.add_argument(
        "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
    )

    args = parser.parse_args()  # 解析命令行参数
    convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)  # 执行转换操作

.\models\roformer\modeling_flax_roformer.py

# 导入必要的库和模块
from typing import Callable, Optional, Tuple  # 导入类型提示相关的模块

import flax.linen as nn  # 导入Flax的linen模块,用于定义模型结构
import jax  # 导入JAX,用于自动求导和并行计算
import jax.numpy as jnp  # 导入JAX的NumPy接口,用于多维数组操作
import numpy as np  # 导入NumPy,用于基本的数值计算
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze  # 导入Flax的FrozenDict等相关模块,用于管理不可变字典
from flax.linen.attention import dot_product_attention_weights  # 导入注意力机制相关模块
from flax.traverse_util import flatten_dict, unflatten_dict  # 导入工具函数,用于扁平化和反扁平化字典结构
from jax import lax  # 导入JAX的lax模块,用于定义低级的线性代数和信号处理原语

# 导入相关模型输出和工具函数
from ...modeling_flax_outputs import (
    FlaxBaseModelOutput,
    FlaxMaskedLMOutput,
    FlaxMultipleChoiceModelOutput,
    FlaxQuestionAnsweringModelOutput,
    FlaxSequenceClassifierOutput,
    FlaxTokenClassifierOutput,
)
from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging

# 导入RoFormer配置文件
from .configuration_roformer import RoFormerConfig

# 获取日志记录器
logger = logging.get_logger(__name__)

# 定义用于文档的检查点和配置信息
_CHECKPOINT_FOR_DOC = "junnyu/roformer_chinese_base"
_CONFIG_FOR_DOC = "RoFormerConfig"

# 预训练模型存档列表
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "junnyu/roformer_chinese_small",
    "junnyu/roformer_chinese_base",
    "junnyu/roformer_chinese_char_small",
    "junnyu/roformer_chinese_char_base",
    "junnyu/roformer_small_discriminator",
    "junnyu/roformer_small_generator",
    # 查看所有RoFormer模型 https://huggingface.co/models?filter=roformer
]

# RoFormer模型的起始文档字符串,包含模型介绍和Flax特性说明
ROFORMER_START_DOCSTRING = r"""

    This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading, saving and converting weights from PyTorch models)

    This model is also a
    [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
    a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
    behavior.

    Finally, this model supports inherent JAX features such as:

    - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
    - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
    - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
    - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
    # Parameters:
    #     config ([`RoFormerConfig`]): 模型配置类,包含模型的所有参数。
    #         初始化时使用配置文件不会加载与模型关联的权重,只加载配置信息。
    #         可查看 [`~FlaxPreTrainedModel.from_pretrained`] 方法来加载模型权重。
    #     dtype (`jax.numpy.dtype`, *optional*, 默认为 `jax.numpy.float32`):
    #         计算的数据类型。可以是 `jax.numpy.float32`、`jax.numpy.float16`(在GPU上)、`jax.numpy.bfloat16`(在TPU上)之一。
    #         
    #         这可以用于在GPU或TPU上启用混合精度训练或半精度推断。如果指定了dtype,则所有计算将使用指定的数据类型。
    #         
    #         **注意,这只指定了计算的数据类型,并不影响模型参数的数据类型。**
    #         
    #         如果想要更改模型参数的数据类型,请参见 [`~FlaxPreTrainedModel.to_fp16`] 和 [`~FlaxPreTrainedModel.to_bf16`]。
"""

ROFORMER_INPUTS_DOCSTRING = r"""
    Args:
        input_ids (`numpy.ndarray` of shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        head_mask (`numpy.ndarray` of shape `({0})`, `optional):
            Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""


# Copied from transformers.models.marian.modeling_flax_marian.create_sinusoidal_positions
def create_sinusoidal_positions(n_pos, dim):
    # 创建一个 sinusoidal 位置编码的函数
    position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
    # 将维度除以2后加上余数,作为分割点
    sentinel = dim // 2 + dim % 2
    # 初始化一个和位置编码形状相同的零矩阵
    out = np.zeros_like(position_enc)
    # 计算 sin 和 cos 的值并填充到输出矩阵
    out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
    out[:, sentinel:] = np.cos(position_enc[:, 1::2])

    return jnp.array(out)


class FlaxRoFormerEmbeddings(nn.Module):
    """Construct the embeddings from word and token_type embeddings."""

    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32  # the dtype of the computation
    # 在对象的设置方法中初始化词嵌入层
    def setup(self):
        # 初始化词嵌入层,使用正态分布初始化,标准差为配置中的初始化范围
        self.word_embeddings = nn.Embed(
            self.config.vocab_size,
            self.config.hidden_size,
            embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
        )
        # 初始化标记类型嵌入层,使用正态分布初始化,标准差为配置中的初始化范围
        self.token_type_embeddings = nn.Embed(
            self.config.type_vocab_size,
            self.config.hidden_size,
            embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
        )
        # 初始化 Layer Normalization 层,设置 epsilon 为配置中的层归一化 epsilon 值,数据类型为对象的数据类型
        self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
        # 初始化 Dropout 层,设置丢弃率为配置中的隐藏层丢弃率
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)

    # 在对象调用方法中执行模型的前向传播
    def __call__(self, input_ids, token_type_ids, attention_mask, deterministic: bool = True):
        # 将输入 ID 转换为整数类型,并进行词嵌入
        inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
        # 将标记类型 ID 转换为整数类型,并进行标记类型嵌入
        token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))

        # 将词嵌入和标记类型嵌入求和,得到隐藏状态
        hidden_states = inputs_embeds + token_type_embeddings

        # 对隐藏状态进行 Layer Normalization 处理
        hidden_states = self.LayerNorm(hidden_states)
        # 使用 Dropout 进行隐藏状态的随机丢弃,如果 deterministic=True,则使用确定性模式
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)
        
        # 返回处理后的隐藏状态作为模型的输出
        return hidden_states
class FlaxRoFormerSelfAttention(nn.Module):
    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32  # 计算时使用的数据类型

    def setup(self) -> None:
        if self.config.hidden_size % self.config.num_attention_heads != 0:
            raise ValueError(
                "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
                "                   : {self.config.num_attention_heads}"
            )

        # 定义查询向量的全连接层
        self.query = nn.Dense(
            self.config.hidden_size,
            dtype=self.dtype,
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
        )
        # 定义键向量的全连接层
        self.key = nn.Dense(
            self.config.hidden_size,
            dtype=self.dtype,
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
        )
        # 定义值向量的全连接层
        self.value = nn.Dense(
            self.config.hidden_size,
            dtype=self.dtype,
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
        )

        # 是否使用旋转注意力值
        self.rotary_value = self.config.rotary_value

    def __call__(
        self,
        hidden_states,
        attention_mask,
        sinusoidal_pos,
        layer_head_mask,
        deterministic=True,
        output_attentions: bool = False,
    ):
        # 将每个注意力头的维度设置为隐藏状态的维度除以注意力头的数量
        head_dim = self.config.hidden_size // self.config.num_attention_heads

        # 通过查询操作获取查询状态,并重新整形为 (batch_size, seq_length, num_attention_heads, head_dim)
        query_states = self.query(hidden_states).reshape(
            hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
        )
        
        # 通过值操作获取值状态,并重新整形为 (batch_size, seq_length, num_attention_heads, head_dim)
        value_states = self.value(hidden_states).reshape(
            hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
        )
        
        # 通过键操作获取键状态,并重新整形为 (batch_size, seq_length, num_attention_heads, head_dim)
        key_states = self.key(hidden_states).reshape(
            hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
        )

        # 如果存在 sinusoidal_pos,则应用旋转位置嵌入到查询、键和值状态
        if sinusoidal_pos is not None:
            if self.rotary_value:
                # 如果启用了旋转值,应用旋转位置嵌入到查询、键和值状态
                query_states, key_states, value_states = self.apply_rotary_position_embeddings(
                    sinusoidal_pos, query_states, key_states, value_states
                )
            else:
                # 否则,仅应用旋转位置嵌入到查询和键状态
                query_states, key_states = self.apply_rotary_position_embeddings(
                    sinusoidal_pos, query_states, key_states
                )

        # 将布尔类型的注意力遮罩转换为注意力偏置
        if attention_mask is not None:
            # 在注意力遮罩的基础上扩展一个维度,形状变为 (batch_size, num_attention_heads, seq_length, seq_length)
            attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
            # 根据注意力遮罩的值生成注意力偏置
            attention_bias = lax.select(
                attention_mask > 0,
                jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
                jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
            )
        else:
            attention_bias = None

        # 初始化 dropout RNG
        dropout_rng = None
        if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
            dropout_rng = self.make_rng("dropout")

        # 计算点积注意力权重
        attn_weights = dot_product_attention_weights(
            query_states,
            key_states,
            bias=attention_bias,
            dropout_rng=dropout_rng,
            dropout_rate=self.config.attention_probs_dropout_prob,
            broadcast_dropout=True,
            deterministic=deterministic,
            dtype=self.dtype,
            precision=None,
        )

        # 如果需要,通过层头掩码屏蔽注意力头
        if layer_head_mask is not None:
            attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)

        # 计算注意力输出
        attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
        attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))

        # 根据需要返回注意力输出和注意力权重
        outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
        return outputs

    @staticmethod
    # 将输入的 sinusoidal_pos 张量按最后一个维度分割成两部分,分别表示 sin 和 cos 值
    sin, cos = sinusoidal_pos.split(2, axis=-1)
    # 根据分割后的 sin 值创建新的张量,维度与输入相同
    sin_pos = jnp.stack([sin, sin], axis=-1).reshape(sinusoidal_pos.shape)
    # 根据分割后的 cos 值创建新的张量,维度与输入相同
    cos_pos = jnp.stack([cos, cos], axis=-1).reshape(sinusoidal_pos.shape)

    # 定义函数,用于对输入的层进行旋转操作
    def rotate_layer(layer, sin_pos, cos_pos):
        # 将层按最后一个维度分割成两部分,取反交换顺序后再重组成原始形状的张量
        rotate_half_layer = jnp.stack([-layer[..., 1::2], layer[..., ::2]], axis=-1).reshape(layer.shape)
        # 使用输入的 cos_pos 张量对原始层进行加权叠加,生成旋转后的结果
        rotary_matrix_cos = jnp.einsum("bslh,...sh->bslh", layer, cos_pos)
        # 使用经过交换顺序的 rotate_half_layer 和 sin_pos 张量进行加权叠加,生成旋转后的结果
        rotary_matrix_sin = jnp.einsum("bslh,...sh->bslh", rotate_half_layer, sin_pos)
        # 将 cos 和 sin 叠加得到的结果相加,得到最终旋转后的层
        return rotary_matrix_cos + rotary_matrix_sin

    # 对 query_layer 和 key_layer 应用旋转函数
    query_layer = rotate_layer(query_layer, sin_pos, cos_pos)
    key_layer = rotate_layer(key_layer, sin_pos, cos_pos)
    # 如果提供了 value_layer,则也对其应用旋转函数,并返回旋转后的三个层
    if value_layer is not None:
        value_layer = rotate_layer(value_layer, sin_pos, cos_pos)
        return query_layer, key_layer, value_layer
    # 否则,只返回旋转后的 query_layer 和 key_layer
    return query_layer, key_layer
# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->RoFormer
class FlaxRoFormerSelfOutput(nn.Module):
    config: RoFormerConfig  # 用于保存RoFormer模型配置的对象
    dtype: jnp.dtype = jnp.float32  # 计算过程中使用的数据类型,默认为32位浮点数

    def setup(self):
        self.dense = nn.Dense(
            self.config.hidden_size,  # 创建具有指定大小的全连接层
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),  # 使用正态分布初始化全连接层的权重
            dtype=self.dtype,  # 指定层的数据类型
        )
        self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)  # 创建Layer Normalization层
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)  # 创建Dropout层,用于随机丢弃神经元

    def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
        hidden_states = self.dense(hidden_states)  # 使用全连接层处理输入的隐藏状态
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)  # 对处理后的隐藏状态应用Dropout
        hidden_states = self.LayerNorm(hidden_states + input_tensor)  # 应用Layer Normalization到处理后的状态上
        return hidden_states


class FlaxRoFormerAttention(nn.Module):
    config: RoFormerConfig  # 用于保存RoFormer模型配置的对象
    dtype: jnp.dtype = jnp.float32  # 计算过程中使用的数据类型,默认为32位浮点数

    def setup(self):
        self.self = FlaxRoFormerSelfAttention(self.config, dtype=self.dtype)  # 创建RoFormer自注意力层对象
        self.output = FlaxRoFormerSelfOutput(self.config, dtype=self.dtype)  # 创建RoFormer自注意力输出层对象

    def __call__(
        self,
        hidden_states,
        attention_mask,
        sinusoidal_pos,
        layer_head_mask,
        deterministic=True,
        output_attentions: bool = False,
    ):
        # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
        # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
        # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
        attn_outputs = self.self(
            hidden_states,
            attention_mask,
            sinusoidal_pos,
            layer_head_mask=layer_head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
        )  # 调用RoFormer自注意力层处理输入
        attn_output = attn_outputs[0]  # 获取自注意力层的输出
        hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)  # 将自注意力层的输出传递给RoFormer自注意力输出层处理

        outputs = (hidden_states,)  # 准备输出元组,包含处理后的隐藏状态

        if output_attentions:
            outputs += (attn_outputs[1],)  # 如果需要输出注意力权重,则添加到输出元组中

        return outputs


# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->RoFormer
class FlaxRoFormerIntermediate(nn.Module):
    config: RoFormerConfig  # 用于保存RoFormer模型配置的对象
    dtype: jnp.dtype = jnp.float32  # 计算过程中使用的数据类型,默认为32位浮点数

    def setup(self):
        self.dense = nn.Dense(
            self.config.intermediate_size,  # 创建具有指定大小的全连接层
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),  # 使用正态分布初始化全连接层的权重
            dtype=self.dtype,  # 指定层的数据类型
        )
        self.activation = ACT2FN[self.config.hidden_act]  # 获取激活函数对象

    def __call__(self, hidden_states):
        hidden_states = self.dense(hidden_states)  # 使用全连接层处理输入的隐藏状态
        hidden_states = self.activation(hidden_states)  # 应用激活函数到处理后的隐藏状态上
        return hidden_states


# Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->RoFormer
# 定义一个名为 FlaxRoFormerOutput 的类,继承自 nn.Module
class FlaxRoFormerOutput(nn.Module):
    # 设置类变量 config,类型为 RoFormerConfig,表示 RoFormer 的配置
    config: RoFormerConfig
    # 设置类变量 dtype,默认为 jnp.float32,表示计算的数据类型
    dtype: jnp.dtype = jnp.float32  # 计算的数据类型为浮点数

    # 初始化函数 setup,用于初始化模块的各个子模块
    def setup(self):
        # 初始化一个全连接层 Dense,用于隐藏状态到输出层的映射
        self.dense = nn.Dense(
            self.config.hidden_size,
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),  # 使用正态分布初始化权重
            dtype=self.dtype,  # 设置数据类型为类变量中定义的数据类型
        )
        # 初始化一个 Dropout 层,用于随机丢弃神经元,防止过拟合
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
        # 初始化一个 LayerNorm 层,用于归一化隐藏状态,提升模型训练稳定性
        self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)

    # 实现类的调用方法,接受 hidden_states 和 attention_output 作为输入,返回处理后的 hidden_states
    def __call__(self, hidden_states, attention_output, deterministic: bool = True):
        # 将 hidden_states 通过全连接层 Dense 进行线性变换
        hidden_states = self.dense(hidden_states)
        # 对变换后的 hidden_states 进行 Dropout 操作,根据 deterministic 参数决定是否确定性处理
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)
        # 将 Dropout 后的 hidden_states 与 attention_output 相加,并通过 LayerNorm 进行归一化处理
        hidden_states = self.LayerNorm(hidden_states + attention_output)
        # 返回处理后的 hidden_states
        return hidden_states


# 定义一个名为 FlaxRoFormerLayer 的类,继承自 nn.Module
class FlaxRoFormerLayer(nn.Module):
    # 设置类变量 config,类型为 RoFormerConfig,表示 RoFormer 的配置
    config: RoFormerConfig
    # 设置类变量 dtype,默认为 jnp.float32,表示计算的数据类型
    dtype: jnp.dtype = jnp.float32  # 计算的数据类型为浮点数

    # 初始化函数 setup,用于初始化模块的各个子模块
    def setup(self):
        # 初始化一个 RoFormer 注意力机制模块 FlaxRoFormerAttention
        self.attention = FlaxRoFormerAttention(self.config, dtype=self.dtype)
        # 初始化一个 RoFormer 中间层模块 FlaxRoFormerIntermediate
        self.intermediate = FlaxRoFormerIntermediate(self.config, dtype=self.dtype)
        # 初始化一个 RoFormer 输出层模块 FlaxRoFormerOutput
        self.output = FlaxRoFormerOutput(self.config, dtype=self.dtype)

    # 实现类的调用方法,接受多个输入参数,返回处理后的输出
    def __call__(
        self,
        hidden_states,
        attention_mask,
        sinusiodal_pos,
        layer_head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
    ):
        # 使用注意力机制模块处理 hidden_states 和相关参数,得到 attention_outputs
        attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            sinusiodal_pos,
            layer_head_mask=layer_head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
        )
        # 从 attention_outputs 中获取注意力输出结果 attention_output
        attention_output = attention_outputs[0]

        # 将 attention_output 通过中间层模块进行处理,得到 hidden_states
        hidden_states = self.intermediate(attention_output)
        # 将处理后的 hidden_states 通过输出层模块进行处理,得到最终的输出结果
        hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)

        # 将最终处理后的 hidden_states 存入 outputs 中
        outputs = (hidden_states,)

        # 如果需要输出注意力信息,将注意力信息加入到 outputs 中一并返回
        if output_attentions:
            outputs += (attention_outputs[1],)
        # 返回输出结果
        return outputs


# 定义一个名为 FlaxRoFormerLayerCollection 的类,继承自 nn.Module
class FlaxRoFormerLayerCollection(nn.Module):
    # 设置类变量 config,类型为 RoFormerConfig,表示 RoFormer 的配置
    config: RoFormerConfig
    # 设置类变量 dtype,默认为 jnp.float32,表示计算的数据类型
    dtype: jnp.dtype = jnp.float32  # 计算的数据类型为浮点数

    # 初始化函数 setup,用于初始化模块的各个子模块
    def setup(self):
        # 初始化 RoFormer 的多层模块 layers,使用列表推导式创建多个 FlaxRoFormerLayer
        self.layers = [
            FlaxRoFormerLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
        ]

    # 实现类的调用方法,接受多个输入参数,返回处理后的输出
    def __call__(
        self,
        hidden_states,
        attention_mask,
        sinusoidal_pos,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
        ):
            # 如果不需要输出注意力,初始化一个空的元组
            all_attentions = () if output_attentions else None
            # 如果不需要输出隐藏状态,初始化一个空的元组
            all_hidden_states = () if output_hidden_states else None

            # 检查如果指定了 head_mask,则需要确保其层数正确
            if head_mask is not None:
                if head_mask.shape[0] != (len(self.layers)):
                    # 抛出数值错误,指出 head_mask 应该对应于 self.layers 的层数,但其对应于 head_mask.shape[0] 层。
                    raise ValueError(
                        f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.shape[0]}."
                    )

            # 遍历 self.layers 中的每一层
            for i, layer in enumerate(self.layers):
                # 如果需要输出隐藏状态,则将当前隐藏状态添加到 all_hidden_states 中
                if output_hidden_states:
                    all_hidden_states += (hidden_states,)

                # 调用当前层的前向传播函数
                layer_outputs = layer(
                    hidden_states,
                    attention_mask,
                    sinusoidal_pos,
                    layer_head_mask=head_mask[i] if head_mask is not None else None,
                    deterministic=deterministic,
                    output_attentions=output_attentions,
                )

                # 更新隐藏状态为当前层的输出的第一个元素
                hidden_states = layer_outputs[0]

                # 如果需要输出注意力,则将当前层的注意力输出添加到 all_attentions 中
                if output_attentions:
                    all_attentions += (layer_outputs[1],)

            # 如果需要输出隐藏状态,则将最终的隐藏状态添加到 all_hidden_states 中
            if output_hidden_states:
                all_hidden_states += (hidden_states,)

            # 将最终的隐藏状态作为输出的第一个元素
            outputs = (hidden_states,)

            # 如果不需要返回字典形式的输出,则返回 outputs 中非 None 的值作为元组
            if not return_dict:
                return tuple(v for v in outputs if v is not None)

            # 返回 FlaxBaseModelOutput 类型的对象,包含最终的隐藏状态、所有隐藏状态和所有注意力输出
            return FlaxBaseModelOutput(
                last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
            )
# 定义一个 FlaxRoFormerEncoder 类,继承自 nn.Module
class FlaxRoFormerEncoder(nn.Module):
    config: RoFormerConfig  # RoFormer 的配置信息
    dtype: jnp.dtype = jnp.float32  # 计算时使用的数据类型

    # 模块设置方法,初始化嵌入位置和 RoFormer 层集合
    def setup(self):
        # 创建正弦位置编码
        self.embed_positions = create_sinusoidal_positions(
            self.config.max_position_embeddings, self.config.hidden_size // self.config.num_attention_heads
        )
        # 创建 RoFormer 层集合
        self.layer = FlaxRoFormerLayerCollection(self.config, dtype=self.dtype)

    # 调用方法,对输入进行编码处理
    def __call__(
        self,
        hidden_states,  # 输入的隐藏状态
        attention_mask,  # 注意力遮罩
        head_mask,  # 头部遮罩
        deterministic: bool = True,  # 是否确定性计算
        output_attentions: bool = False,  # 是否输出注意力权重
        output_hidden_states: bool = False,  # 是否输出隐藏状态
        return_dict: bool = True,  # 是否返回字典形式的结果
    ):
        # 获取正弦位置编码的一部分
        sinusoidal_pos = self.embed_positions[: hidden_states.shape[1], :]
        
        # 调用 RoFormer 层集合对输入进行处理并返回结果
        return self.layer(
            hidden_states,
            attention_mask,
            sinusoidal_pos,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )


# 从 transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform 复制而来,将 Bert 替换为 RoFormer
class FlaxRoFormerPredictionHeadTransform(nn.Module):
    config: RoFormerConfig  # RoFormer 的配置信息
    dtype: jnp.dtype = jnp.float32  # 计算时使用的数据类型

    # 模块设置方法,初始化密集层、激活函数和层归一化
    def setup(self):
        # 密集层,输出维度为配置中的隐藏大小
        self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
        # 激活函数,根据配置选择
        self.activation = ACT2FN[self.config.hidden_act]
        # 层归一化,使用配置中的参数
        self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)

    # 调用方法,对输入的隐藏状态进行变换处理
    def __call__(self, hidden_states):
        # 密集层处理隐藏状态
        hidden_states = self.dense(hidden_states)
        # 应用激活函数
        hidden_states = self.activation(hidden_states)
        # 应用层归一化
        return self.LayerNorm(hidden_states)


# 从 transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead 复制而来,将 Bert 替换为 RoFormer
class FlaxRoFormerLMPredictionHead(nn.Module):
    config: RoFormerConfig  # RoFormer 的配置信息
    dtype: jnp.dtype = jnp.float32  # 计算时使用的数据类型
    bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros  # 偏置的初始化方法

    # 模块设置方法,初始化预测头变换和解码器层
    def setup(self):
        # 预测头变换,使用 RoFormerPredictionHeadTransform 进行初始化
        self.transform = FlaxRoFormerPredictionHeadTransform(self.config, dtype=self.dtype)
        # 解码器层,输出维度为配置中的词汇大小,不使用偏置
        self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False)
        # 偏置项,初始化为 0
        self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,))

    # 调用方法,对输入的隐藏状态进行处理,生成预测结果
    def __call__(self, hidden_states, shared_embedding=None):
        # 使用预测头变换处理隐藏状态
        hidden_states = self.transform(hidden_states)

        # 如果有共享嵌入向量,则使用共享的权重矩阵进行解码
        if shared_embedding is not None:
            hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
        else:
            # 否则直接使用解码器层进行解码
            hidden_states = self.decoder(hidden_states)

        # 添加偏置项
        bias = jnp.asarray(self.bias, self.dtype)
        hidden_states += bias
        return hidden_states


# 从 transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead 复制而来,将 Bert 替换为 RoFormer
class FlaxRoFormerOnlyMLMHead(nn.Module):
    config: RoFormerConfig  # RoFormer 的配置信息
    dtype: jnp.dtype = jnp.float32  # 计算时使用的数据类型
    # 在对象初始化过程中设置预测头部,使用配置和指定数据类型
    def setup(self):
        self.predictions = FlaxRoFormerLMPredictionHead(self.config, dtype=self.dtype)

    # 调用实例时,将隐藏状态作为输入传递给预测头部模型,可选地传递共享的嵌入
    def __call__(self, hidden_states, shared_embedding=None):
        hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding)
        return hidden_states
class FlaxRoFormerClassificationHead(nn.Module):
    config: RoFormerConfig  # 定义一个属性 config,类型为 RoFormerConfig
    dtype: jnp.dtype = jnp.float32  # 定义一个属性 dtype,默认为 jnp.float32

    def setup(self):
        self.dense = nn.Dense(  # 初始化一个全连接层 dense
            self.config.hidden_size,  # 使用 config 中的 hidden_size
            dtype=self.dtype,  # 设置数据类型为 dtype
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),  # 使用正态分布初始化权重
        )
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)  # 初始化一个 dropout 层
        self.out_proj = nn.Dense(  # 初始化一个全连接层 out_proj
            self.config.num_labels,  # 使用 config 中的 num_labels
            dtype=self.dtype,  # 设置数据类型为 dtype
            kernel_init=jax.nn.initializers.normal(self.config.initializer_range),  # 使用正态分布初始化权重
        )
        self.activation = ACT2FN[self.config.hidden_act]  # 设置激活函数为 config 中指定的 hidden_act 函数

    def __call__(self, hidden_states, deterministic=True):
        hidden_states = hidden_states[:, 0, :]  # 取隐藏状态的第一个 token(等同于 [CLS])
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)  # 应用 dropout
        hidden_states = self.dense(hidden_states)  # 应用全连接层 dense
        hidden_states = self.activation(hidden_states)  # 应用激活函数
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)  # 再次应用 dropout
        hidden_states = self.out_proj(hidden_states)  # 应用全连接层 out_proj
        return hidden_states


class FlaxRoFormerPreTrainedModel(FlaxPreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

    config_class = RoFormerConfig  # 指定配置类为 RoFormerConfig
    base_model_prefix = "roformer"  # 基础模型前缀为 "roformer"
    module_class: nn.Module = None  # 模块类属性初始化为 None

    def __init__(
        self,
        config: RoFormerConfig,
        input_shape: Tuple = (1, 1),
        seed: int = 0,
        dtype: jnp.dtype = jnp.float32,
        _do_init: bool = True,
        **kwargs,
    ):
        module = self.module_class(config=config, dtype=dtype, **kwargs)  # 初始化模块对象
        super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)

    def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
        # 初始化输入张量
        input_ids = jnp.zeros(input_shape, dtype="i4")
        token_type_ids = jnp.zeros_like(input_ids)
        attention_mask = jnp.ones_like(input_ids)
        head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))

        params_rng, dropout_rng = jax.random.split(rng)  # 拆分随机数生成器
        rngs = {"params": params_rng, "dropout": dropout_rng}  # 构建随机数生成器字典

        random_params = self.module.init(  # 使用模块的初始化方法初始化参数
            rngs, input_ids, attention_mask, token_type_ids, head_mask, return_dict=False
        )["params"]

        if params is not None:
            random_params = flatten_dict(unfreeze(random_params))  # 展开随机参数
            params = flatten_dict(unfreeze(params))  # 展开给定参数
            for missing_key in self._missing_keys:
                params[missing_key] = random_params[missing_key]  # 将缺失的参数键添加到给定参数中
            self._missing_keys = set()  # 清空缺失键集合
            return freeze(unflatten_dict(params))  # 冻结和重建参数字典
        else:
            return random_params  # 返回随机初始化的参数
    # 添加模型前向传播的文档字符串,格式化插入批处理大小和序列长度信息
    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    # 定义对象调用方法,接受多个参数
    def __call__(
        self,
        input_ids,  # 输入的标识符张量
        attention_mask=None,  # 注意力掩码张量,默认为None
        token_type_ids=None,  # 标记类型张量,默认为None
        head_mask=None,  # 头掩码张量,默认为None
        params: dict = None,  # 参数字典,默认为None
        dropout_rng: jax.random.PRNGKey = None,  # 随机数生成器密钥,默认为None
        train: bool = False,  # 是否处于训练模式,默认为False
        output_attentions: Optional[bool] = None,  # 是否输出注意力,默认为None
        output_hidden_states: Optional[bool] = None,  # 是否输出隐藏状态,默认为None
        return_dict: Optional[bool] = None,  # 是否返回字典,默认为None
    ):
        # 如果未指定output_attentions,则使用配置中的output_attentions值
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        # 如果未指定output_hidden_states,则使用配置中的output_hidden_states值
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        # 如果未指定return_dict,则使用配置中的return_dict值
        return_dict = return_dict if return_dict is not None else self.config.return_dict

        # 如果token_type_ids为None,则初始化为与input_ids相同形状的全零张量
        if token_type_ids is None:
            token_type_ids = jnp.zeros_like(input_ids)

        # 如果attention_mask为None,则初始化为与input_ids相同形状的全一张量
        if attention_mask is None:
            attention_mask = jnp.ones_like(input_ids)

        # 如果head_mask为None,则初始化为形状为(num_hidden_layers, num_attention_heads)的全一张量
        if head_mask is None:
            head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))

        # 如果需要处理任何PRNG(伪随机数生成器),则将其存储在rngs字典中
        rngs = {}
        if dropout_rng is not None:
            rngs["dropout"] = dropout_rng

        # 调用模块的apply方法,进行模型前向传播
        return self.module.apply(
            {"params": params or self.params},  # 模型参数字典
            jnp.array(input_ids, dtype="i4"),  # 输入标识符张量,转换为32位有符号整数类型
            jnp.array(attention_mask, dtype="i4"),  # 注意力掩码张量,转换为32位有符号整数类型
            jnp.array(token_type_ids, dtype="i4"),  # 标记类型张量,转换为32位有符号整数类型
            jnp.array(head_mask, dtype="i4"),  # 头掩码张量,转换为32位有符号整数类型
            not train,  # 是否处于评估模式
            output_attentions,  # 是否输出注意力
            output_hidden_states,  # 是否输出隐藏状态
            return_dict,  # 是否返回字典
            rngs=rngs,  # 伪随机数生成器字典
        )
# 定义了一个 FlaxRoFormerModule 类,继承自 nn.Module
class FlaxRoFormerModule(nn.Module):
    # 类属性,指定了配置的类型为 RoFormerConfig
    config: RoFormerConfig
    # 类属性,指定了计算中使用的数据类型为 jnp.float32
    dtype: jnp.dtype = jnp.float32  # 计算中使用的数据类型

    # 设置方法,在实例化时被调用,用于初始化模块的各个组件
    def setup(self):
        # 初始化嵌入层,使用 FlaxRoFormerEmbeddings 类
        self.embeddings = FlaxRoFormerEmbeddings(self.config, dtype=self.dtype)
        # 初始化编码器,使用 FlaxRoFormerEncoder 类
        self.encoder = FlaxRoFormerEncoder(self.config, dtype=self.dtype)

    # 实现了调用运算符 (),定义了模型的前向传播逻辑
    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
    ):
        # 获取输入经过嵌入层后的隐藏状态
        hidden_states = self.embeddings(input_ids, token_type_ids, attention_mask, deterministic=deterministic)
        # 将隐藏状态传入编码器,获取编码后的输出
        outputs = self.encoder(
            hidden_states,
            attention_mask,
            head_mask=head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        # 提取编码器输出的隐藏状态
        hidden_states = outputs[0]

        # 如果不要求返回字典形式的输出,则返回元组
        if not return_dict:
            return (hidden_states,) + outputs[1:]

        # 返回 FlaxBaseModelOutput 对象,包含最后的隐藏状态、所有隐藏状态和注意力机制输出
        return FlaxBaseModelOutput(
            last_hidden_state=hidden_states,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )


# 为 FlaxRoFormerModel 类添加文档字符串,描述其作为 RoFormer 模型的基本转换器的输出
@add_start_docstrings(
    "The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.",
    ROFORMER_START_DOCSTRING,
)
# FlaxRoFormerModel 类继承自 FlaxRoFormerPreTrainedModel 类
class FlaxRoFormerModel(FlaxRoFormerPreTrainedModel):
    # 指定模块类为 FlaxRoFormerModule
    module_class = FlaxRoFormerModule


# 添加调用示例文档字符串到 FlaxRoFormerModel 类
append_call_sample_docstring(FlaxRoFormerModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)


# 定义了一个 FlaxRoFormerForMaskedLMModule 类,继承自 nn.Module
class FlaxRoFormerForMaskedLMModule(nn.Module):
    # 类属性,指定了配置的类型为 RoFormerConfig
    config: RoFormerConfig
    # 类属性,指定了计算中使用的数据类型为 jnp.float32
    dtype: jnp.dtype = jnp.float32

    # 设置方法,在实例化时被调用,用于初始化模块的各个组件
    def setup(self):
        # 初始化 RoFormer 模型,使用 FlaxRoFormerModule 类
        self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
        # 初始化仅包含 MLM 头部的类,使用 FlaxRoFormerOnlyMLMHead 类
        self.cls = FlaxRoFormerOnlyMLMHead(config=self.config, dtype=self.dtype)

    # 实现了调用运算符 (),定义了模型的前向传播逻辑
    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
        # 调用模型进行前向传播,获取模型输出
        outputs = self.roformer(
            input_ids,
            attention_mask,
            token_type_ids,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 从模型输出中提取隐藏状态
        hidden_states = outputs[0]

        # 如果配置允许词嵌入共享,则获取共享的词嵌入
        if self.config.tie_word_embeddings:
            shared_embedding = self.roformer.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
        else:
            shared_embedding = None

        # 使用分类头部模型计算预测分数(logits)
        logits = self.cls(hidden_states, shared_embedding=shared_embedding)

        # 如果不需要返回字典形式的结果,则返回 logits 和额外的输出
        if not return_dict:
            return (logits,) + outputs[1:]

        # 返回 FlaxMaskedLMOutput 类的实例作为字典形式的输出结果
        return FlaxMaskedLMOutput(
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
@add_start_docstrings("""RoFormer Model with a `language modeling` head on top.""", ROFORMER_START_DOCSTRING)
class FlaxRoFormerForMaskedLM(FlaxRoFormerPreTrainedModel):
    module_class = FlaxRoFormerForMaskedLMModule

# 添加用于RoFormer Masked Language Modeling模型的起始文档字符串,继承自FlaxRoFormerPreTrainedModel


append_call_sample_docstring(
    FlaxRoFormerForMaskedLM,
    _CHECKPOINT_FOR_DOC,
    FlaxMaskedLMOutput,
    _CONFIG_FOR_DOC,
    mask="<mask>",
)

# 添加调用示例的文档字符串,用于FlaxRoFormerForMaskedLM模型,包括检查点、输出、配置和掩码信息


class FlaxRoFormerForSequenceClassificationModule(nn.Module):
    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32

    def setup(self):
        self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
        self.classifier = FlaxRoFormerClassificationHead(config=self.config, dtype=self.dtype)

    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
    ):
        # Model
        outputs = self.roformer(
            input_ids,
            attention_mask,
            token_type_ids,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        sequence_output = outputs[0]
        logits = self.classifier(sequence_output, deterministic=deterministic)

        if not return_dict:
            return (logits,) + outputs[1:]

        return FlaxSequenceClassifierOutput(
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

# RoFormer用于序列分类任务的模块定义,设置了RoFormer模型和分类器头部,支持返回字典或元组格式的输出


@add_start_docstrings(
    """
    RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    """,
    ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForSequenceClassification(FlaxRoFormerPreTrainedModel):
    module_class = FlaxRoFormerForSequenceClassificationModule

# 添加RoFormer序列分类/回归模型的起始文档字符串,继承自FlaxRoFormerPreTrainedModel,支持GLUE任务等应用


append_call_sample_docstring(
    FlaxRoFormerForSequenceClassification,
    _CHECKPOINT_FOR_DOC,
    FlaxSequenceClassifierOutput,
    _CONFIG_FOR_DOC,
)

# 添加调用示例的文档字符串,用于FlaxRoFormerForSequenceClassification模型,包括检查点、输出和配置信息


class FlaxRoFormerForMultipleChoiceModule(nn.Module):
    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32

    def setup(self):
        self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
        self.classifier = nn.Dense(1, dtype=self.dtype)

    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
    ):
        ):
        # 获取选择数量
        num_choices = input_ids.shape[1]
        # 将输入张量形状重新调整为二维数组,保留最后一个维度不变
        input_ids = input_ids.reshape(-1, input_ids.shape[-1])
        attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1])
        token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1])

        # 调用模型
        outputs = self.roformer(
            input_ids,
            attention_mask,
            token_type_ids,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 在 PyTorch 实现中,等同于调用 sequence_summary
        hidden_states = outputs[0]
        # 提取最后一个隐藏状态作为汇总输出
        pooled_output = hidden_states[:, -1]
        pooled_output = self.dropout(pooled_output, deterministic=deterministic)

        # 通过分类器获取 logits
        logits = self.classifier(pooled_output)

        # 将 logits 重新调整为与选择数量相关的形状
        reshaped_logits = logits.reshape(-1, num_choices)

        # 如果不返回字典,则返回元组,并包括额外的输出
        if not return_dict:
            return (reshaped_logits,) + outputs[2:]

        # 返回 FlaxMultipleChoiceModelOutput 对象,包括调用的相关输出
        return FlaxMultipleChoiceModelOutput(
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
@add_start_docstrings(
    """
    RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
    softmax) e.g. for RocStories/SWAG tasks.
    """,
    ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForMultipleChoice(FlaxRoFormerPreTrainedModel):
    module_class = FlaxRoFormerForMultipleChoiceModule



overwrite_call_docstring(
    FlaxRoFormerForMultipleChoice, ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)



append_call_sample_docstring(
    FlaxRoFormerForMultipleChoice,
    _CHECKPOINT_FOR_DOC,
    FlaxMultipleChoiceModelOutput,
    _CONFIG_FOR_DOC,
)



class FlaxRoFormerForTokenClassificationModule(nn.Module):
    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32

    def setup(self):
        self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
        self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
        self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)

    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
    ):
        # Model
        outputs = self.roformer(
            input_ids,
            attention_mask,
            token_type_ids,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        hidden_states = outputs[0]
        hidden_states = self.dropout(hidden_states, deterministic=deterministic)
        logits = self.classifier(hidden_states)

        if not return_dict:
            return (logits,) + outputs[1:]

        return FlaxTokenClassifierOutput(
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )



@add_start_docstrings(
    """
    RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    """,
    ROFORMER_START_DOCSTRING,
)
class FlaxRoFormerForTokenClassification(FlaxRoFormerPreTrainedModel):
    module_class = FlaxRoFormerForTokenClassificationModule



append_call_sample_docstring(
    FlaxRoFormerForTokenClassification,
    _CHECKPOINT_FOR_DOC,
    FlaxTokenClassifierOutput,
    _CONFIG_FOR_DOC,
)



class FlaxRoFormerForQuestionAnsweringModule(nn.Module):
    config: RoFormerConfig
    dtype: jnp.dtype = jnp.float32

    def setup(self):
        self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype)
        self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
    def __call__(
        self,
        input_ids,
        attention_mask,
        token_type_ids,
        head_mask,
        deterministic: bool = True,
        output_attentions: bool = False,
        output_hidden_states: bool = False,
        return_dict: bool = True,
    ):
        # 调用模型主函数,接受多个输入参数和可选的标志位参数
        # Model
        # 调用 RoFormer 模型进行前向传播
        outputs = self.roformer(
            input_ids,
            attention_mask,
            token_type_ids,
            head_mask,
            deterministic=deterministic,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 从模型输出中提取隐藏状态
        hidden_states = outputs[0]

        # 使用全连接层计算问题回答的开始和结束位置的 logits
        logits = self.qa_outputs(hidden_states)

        # 根据标签数目将 logits 分割为开始和结束 logits
        start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)

        # 压缩最后一个维度,确保 logits 张量维度的一致性
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        # 如果 return_dict 为 False,则返回元组形式的结果
        if not return_dict:
            return (start_logits, end_logits) + outputs[1:]

        # 如果 return_dict 为 True,则返回 FlaxQuestionAnsweringModelOutput 对象
        return FlaxQuestionAnsweringModelOutput(
            start_logits=start_logits,
            end_logits=end_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
@add_start_docstrings(
    """
    RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    """,
    ROFORMER_START_DOCSTRING,
)



# 使用 @add_start_docstrings 装饰器为 FlaxRoFormerForQuestionAnswering 类添加文档字符串,
# 描述其为一种 RoFormer 模型,用于提取式问答任务(如 SQuAD),在隐藏状态输出之上有线性层,
# 用于计算 `span start logits` 和 `span end logits`。
# ROFORMER_START_DOCSTRING 变量用于提供 RoFormer 模型的开始文档字符串。
class FlaxRoFormerForQuestionAnswering(FlaxRoFormerPreTrainedModel):
    module_class = FlaxRoFormerForQuestionAnsweringModule



append_call_sample_docstring(
    FlaxRoFormerForQuestionAnswering,
    _CHECKPOINT_FOR_DOC,
    FlaxQuestionAnsweringModelOutput,
    _CONFIG_FOR_DOC,
)



# 调用 append_call_sample_docstring 函数,将样例调用的文档字符串添加到 FlaxRoFormerForQuestionAnswering 类中,
# 用于说明如何调用该类的样例,并引用了 _CHECKPOINT_FOR_DOC 和 _CONFIG_FOR_DOC 变量。

.\models\roformer\modeling_roformer.py

# coding=utf-8
# 设定文件编码为 UTF-8

# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
# 版权声明:2021 年由 HuggingFace Inc. 团队保留所有权利

# Licensed under the Apache License, Version 2.0 (the "License");
# 根据 Apache License, Version 2.0 许可证授权(“许可证”);

# you may not use this file except in compliance with the License.
# 除非遵守许可证,否则不得使用此文件

# You may obtain a copy of the License at
# 您可以在以下网址获取许可证副本:

#     http://www.apache.org/licenses/LICENSE-2.0
#     http://www.apache.org/licenses/LICENSE-2.0

# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# 除非法律要求或书面同意,否则根据许可证分发的软件都是基于“原样”分发的,无论是明示还是隐含的任何形式的担保或条件

# See the License for the specific language governing permissions and
# limitations under the License.
# 请查阅许可证以了解特定语言下的权限和限制

""" PyTorch RoFormer model."""
# PyTorch RoFormer 模型

import math
import os
from typing import Optional, Tuple, Union

import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss

from ...activations import ACT2FN
from ...modeling_outputs import (
    BaseModelOutputWithPastAndCrossAttentions,
    CausalLMOutputWithCrossAttentions,
    MaskedLMOutput,
    MultipleChoiceModelOutput,
    QuestionAnsweringModelOutput,
    SequenceClassifierOutput,
    TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
    add_code_sample_docstrings,
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    logging,
    replace_return_docstrings,
)
from .configuration_roformer import RoFormerConfig

# 获取 logger 实例,用于记录日志信息
logger = logging.get_logger(__name__)

_CHECKPOINT_FOR_DOC = "junnyu/roformer_chinese_base"
_CONFIG_FOR_DOC = "RoFormerConfig"

ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "junnyu/roformer_chinese_small",
    "junnyu/roformer_chinese_base",
    "junnyu/roformer_chinese_char_small",
    "junnyu/roformer_chinese_char_base",
    "junnyu/roformer_small_discriminator",
    "junnyu/roformer_small_generator",
    # See all RoFormer models at https://huggingface.co/models?filter=roformer
]

# 从 transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding 复制到 RoFormerSinusoidalPositionalEmbedding
class RoFormerSinusoidalPositionalEmbedding(nn.Embedding):
    """This module produces sinusoidal positional embeddings of any length."""
    # 该模块生成任意长度的正弦位置嵌入

    def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
        super().__init__(num_positions, embedding_dim)
        # 调用父类 nn.Embedding 的初始化方法,设定位置数量和嵌入维度

        self.weight = self._init_weight(self.weight)
        # 初始化权重矩阵,调用 _init_weight 方法

    @staticmethod
    def _init_weight(out: nn.Parameter) -> nn.Parameter:
        """
        初始化权重矩阵,类似于 XLM 的 create_sinusoidal_embeddings 函数,但特征未交错。
        余弦特征位于向量的后半部分。[dim // 2:]
        """
        n_pos, dim = out.shape
        # 创建位置编码矩阵,使用正弦和余弦函数
        position_enc = np.array(
            [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
        )
        out.requires_grad = False  # 提前设置为False,以避免在 pytorch-1.8+ 中的一个错误
        sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
        # 将正弦编码部分赋值给 out 的前半部分
        out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
        # 将余弦编码部分赋值给 out 的后半部分
        out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
        out.detach_()  # 分离出该张量
        return out

    @torch.no_grad()
    def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor:
        """`input_ids_shape` 期望为 [bsz x seqlen]。"""
        bsz, seq_len = input_ids_shape[:2]
        # 生成位置索引张量,从 past_key_values_length 到 past_key_values_length + seq_len
        positions = torch.arange(
            past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
        )
        return super().forward(positions)
def load_tf_weights_in_roformer(model, config, tf_checkpoint_path):
    """Load tf checkpoints in a pytorch model."""
    try:
        import re  # 导入正则表达式模块用于字符串匹配

        import numpy as np  # 导入 NumPy 库用于数值计算
        import tensorflow as tf  # 导入 TensorFlow 库用于加载 TensorFlow 模型权重
    except ImportError:
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise

    tf_path = os.path.abspath(tf_checkpoint_path)  # 获取 TensorFlow checkpoint 文件的绝对路径
    logger.info(f"Converting TensorFlow checkpoint from {tf_path}")  # 记录日志,显示正在转换的 TensorFlow checkpoint 路径

    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)  # 获取 TensorFlow checkpoint 中的变量列表
    names = []
    arrays = []
    for name, shape in init_vars:
        logger.info(f"Loading TF weight {name} with shape {shape}")  # 记录日志,显示正在加载的 TensorFlow 权重和形状
        array = tf.train.load_variable(tf_path, name)  # 加载 TensorFlow checkpoint 中的变量数据
        names.append(name.replace("bert", "roformer"))  # 将变量名中的 "bert" 替换为 "roformer"
        arrays.append(array)

    for name, array in zip(names, arrays):
        name = name.split("/")
        
        # adam_v 和 adam_m 是 AdamWeightDecayOptimizer 中用于计算 m 和 v 的变量,对于使用预训练模型不需要
        if any(
            n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
            for n in name
        ):
            logger.info(f"Skipping {'/'.join(name)}")  # 记录日志,跳过不需要加载的变量名
            continue
        
        pointer = model
        for m_name in name:
            if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
                scope_names = re.split(r"_(\d+)", m_name)
            else:
                scope_names = [m_name]

            # 根据变量名的开头选择 PyTorch 模型中的指针位置
            if scope_names[0] == "kernel" or scope_names[0] == "gamma":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
                pointer = getattr(pointer, "bias")
            elif scope_names[0] == "output_weights":
                pointer = getattr(pointer, "weight")
            elif scope_names[0] == "squad":
                pointer = getattr(pointer, "classifier")
            else:
                try:
                    pointer = getattr(pointer, scope_names[0])
                except AttributeError:
                    logger.info(f"Skipping {'/'.join(name)}")  # 记录日志,跳过找不到的变量名
                    continue

            if len(scope_names) >= 2:
                num = int(scope_names[1])
                pointer = pointer[num]

        if m_name[-11:] == "_embeddings":
            pointer = getattr(pointer, "weight")
        elif m_name == "kernel":
            array = np.transpose(array)  # 对于 "kernel" 类型的变量,转置数组

        try:
            if not pointer.shape == array.shape:
                raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
        except AssertionError as e:
            e.args += (pointer.shape, array.shape)
            raise

        logger.info(f"Initialize PyTorch weight {name}")  # 记录日志,显示正在初始化的 PyTorch 权重名
        pointer.data = torch.from_numpy(array)  # 将 NumPy 数组转换为 PyTorch 张量赋值给指针

    return model  # 返回加载完成的 PyTorch 模型
# 定义 RoFormerSelfAttention 类,用于实现自注意力机制
class RoFormerSelfAttention(nn.Module):
    # 初始化函数,设置模型参数和层
    def __init__(self, config):
        super().__init__()
        # 检查隐藏层大小是否可以被注意力头数整除,或者是否具有嵌入大小属性
        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
            # 如果不能整除,抛出数值错误
            raise ValueError(
                f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
                f"heads ({config.num_attention_heads})"
            )

        # 设置注意力头数和每个头的大小
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        # 定义查询、键、值的线性变换层
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        # 定义 dropout 层
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)

        # 设置是否为解码器,以及旋转值的配置
        self.is_decoder = config.is_decoder
        self.rotary_value = config.rotary_value

    # 转置操作,将输入张量重塑为多头注意力的形状
    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    # 前向传播函数,实现自注意力机制的计算
    def forward(
        self,
        hidden_states,
        attention_mask=None,
        sinusoidal_pos=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_value=None,
        output_attentions=False,
    ):
        # 返回隐藏状态的形状,用于后续的嵌入操作
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]

        # 如果没有给定嵌入向量,则使用词嵌入层获取
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)

        # 如果没有给定 token_type_ids,则创建全零张量
        if token_type_ids is None:
            token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=inputs_embeds.device)

        # 根据 token_type_ids 获取 token 类型的嵌入向量
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        # 计算最终的嵌入向量,将词嵌入和 token 类型嵌入相加
        embeddings = inputs_embeds + token_type_embeddings

        # 对嵌入向量进行 LayerNorm 处理
        embeddings = self.LayerNorm(embeddings)
        # 对嵌入向量进行 dropout 处理
        embeddings = self.dropout(embeddings)
        # 返回最终的嵌入向量
        return embeddings
    # 应用旋转位置嵌入到查询、键、值张量中
    def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None):
        # 分割正弦和余弦位置编码
        sin, cos = sinusoidal_pos.chunk(2, dim=-1)
        # 创建重复的正弦位置编码张量,用于与查询、键、值张量相乘
        sin_pos = torch.stack([sin, sin], dim=-1).reshape_as(sinusoidal_pos)
        # 创建重复的余弦位置编码张量,用于与查询、键、值张量相乘
        cos_pos = torch.stack([cos, cos], dim=-1).reshape_as(sinusoidal_pos)
        # 旋转查询张量的一半元素并与余弦、正弦位置编码相乘
        rotate_half_query_layer = torch.stack([-query_layer[..., 1::2], query_layer[..., ::2]], dim=-1).reshape_as(
            query_layer
        )
        query_layer = query_layer * cos_pos + rotate_half_query_layer * sin_pos
        # 旋转键张量的一半元素并与余弦、正弦位置编码相乘
        rotate_half_key_layer = torch.stack([-key_layer[..., 1::2], key_layer[..., ::2]], dim=-1).reshape_as(key_layer)
        key_layer = key_layer * cos_pos + rotate_half_key_layer * sin_pos
        if value_layer is not None:
            # 如果存在值张量,旋转其一半元素并与余弦、正弦位置编码相乘
            rotate_half_value_layer = torch.stack([-value_layer[..., 1::2], value_layer[..., ::2]], dim=-1).reshape_as(
                value_layer
            )
            value_layer = value_layer * cos_pos + rotate_half_value_layer * sin_pos
            return query_layer, key_layer, value_layer
        # 如果不存在值张量,返回旋转后的查询和键张量
        return query_layer, key_layer
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RoFormer
class RoFormerSelfOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 定义全连接层,将输入维度转换为相同的隐藏层维度
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # LayerNorm 层,用于归一化隐藏层状态
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # Dropout 层,用于随机失活以减少过拟合
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 全连接层
        hidden_states = self.dense(hidden_states)
        # Dropout 操作
        hidden_states = self.dropout(hidden_states)
        # LayerNorm 操作,残差连接并归一化
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class RoFormerAttention(nn.Module):
    def __init__(self, config):
        super().__init__()
        # RoFormerSelfAttention 实例
        self.self = RoFormerSelfAttention(config)
        # RoFormerSelfOutput 实例
        self.output = RoFormerSelfOutput(config)
        # 初始化一个集合,用于记录被修剪的注意力头
        self.pruned_heads = set()

    # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
    def prune_heads(self, heads):
        # 如果没有需要修剪的头,则直接返回
        if len(heads) == 0:
            return
        # 调用工具函数找到可以修剪的注意力头和索引
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # 修剪线性层
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # 更新超参数并记录修剪的头
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    # End Copy
    def forward(
        self,
        hidden_states,
        attention_mask=None,
        sinusoidal_pos=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_value=None,
        output_attentions=False,
    ):
        # RoFormerSelfAttention 的前向传播
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            sinusoidal_pos,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        # RoFormerSelfOutput 的前向传播,传入注意力输出和隐藏状态
        attention_output = self.output(self_outputs[0], hidden_states)
        # 如果需要输出注意力权重,将它们添加到输出元组中
        outputs = (attention_output,) + self_outputs[1:]  # 如果有输出注意力权重,则添加它们
        return outputs


# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RoFormer
class RoFormerIntermediate(nn.Module):
    # 初始化方法,用于创建一个新的实例
    def __init__(self, config):
        # 调用父类的初始化方法,确保正确初始化
        super().__init__()
        # 创建一个全连接层,输入大小为config中的隐藏层大小,输出大小为config中的中间层大小
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        
        # 检查config中的隐藏层激活函数,如果是字符串,则使用预定义的激活函数映射表ACT2FN获取对应的函数
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            # 否则,直接使用config中提供的激活函数
            self.intermediate_act_fn = config.hidden_act

    # 前向传播方法,定义了数据在模型中的传递过程
    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 使用全连接层处理输入的隐藏状态数据,得到处理后的结果
        hidden_states = self.dense(hidden_states)
        # 使用中间激活函数处理全连接层输出的数据,得到最终的隐藏状态表示
        hidden_states = self.intermediate_act_fn(hidden_states)
        # 返回处理后的隐藏状态数据
        return hidden_states
# 定义 RoFormer 模型的输出层,继承自 nn.Module 类
class RoFormerOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 创建一个线性层,将中间大小的特征转换为隐藏大小
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        # LayerNorm 层,用于对隐藏状态进行归一化
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # Dropout 层,用于在训练过程中随机断开神经元连接,防止过拟合
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 将输入的隐藏状态通过线性层进行转换
        hidden_states = self.dense(hidden_states)
        # 对转换后的隐藏状态应用 Dropout
        hidden_states = self.dropout(hidden_states)
        # 对 Dropout 后的隐藏状态进行 LayerNorm 处理,并与输入张量相加
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        # 返回处理后的隐藏状态
        return hidden_states


# 定义 RoFormer 模型的一个层,继承自 nn.Module 类
class RoFormerLayer(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 设置 feed forward 阶段的块大小
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        # 序列长度的维度设置为 1
        self.seq_len_dim = 1
        # RoFormer 层的注意力机制,使用 RoFormerAttention 类定义
        self.attention = RoFormerAttention(config)
        # 是否作为解码器使用
        self.is_decoder = config.is_decoder
        # 是否添加跨注意力机制
        self.add_cross_attention = config.add_cross_attention
        # 如果添加跨注意力机制,需要检查是否作为解码器使用,否则抛出错误
        if self.add_cross_attention:
            if not self.is_decoder:
                raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
            # 添加跨注意力机制的注意力层
            self.crossattention = RoFormerAttention(config)
        # RoFormer 层的中间层,使用 RoFormerIntermediate 类定义
        self.intermediate = RoFormerIntermediate(config)
        # RoFormer 层的输出层,使用 RoFormerOutput 类定义
        self.output = RoFormerOutput(config)

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        sinusoidal_pos=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_value=None,
        output_attentions=False,
        # 如果有缓存的键/值对,则取其中的前两个作为自注意力的过去键/值对
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        
        # 使用自注意力层处理隐藏状态,生成自注意力输出
        self_attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            sinusoidal_pos,
            head_mask,
            output_attentions=output_attentions,
            past_key_value=self_attn_past_key_value,
        )
        attention_output = self_attention_outputs[0]  # 获取自注意力输出
        
        # 如果是解码器,最后一个输出是自注意力缓存的元组
        if self.is_decoder:
            outputs = self_attention_outputs[1:-1]  # 提取除最后一个之外的所有输出
            present_key_value = self_attention_outputs[-1]  # 最后一个是当前键/值对
        else:
            outputs = self_attention_outputs[1:]  # 如果需要输出注意力权重,则添加自注意力输出
        
        cross_attn_present_key_value = None
        
        # 如果是解码器且有编码器的隐藏状态
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, "crossattention"):
                raise ValueError(
                    f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention "
                    "layers by setting `config.add_cross_attention=True`"
                )
            
            # 如果有缓存的键/值对,则取其中的后两个作为跨注意力的过去键/值对
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
            
            # 使用跨注意力层处理自注意力输出和编码器隐藏状态,生成跨注意力输出
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask,
                sinusoidal_pos,
                head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                cross_attn_past_key_value,
                output_attentions,
            )
            attention_output = cross_attention_outputs[0]  # 获取跨注意力输出
            outputs = outputs + cross_attention_outputs[1:-1]  # 添加跨注意力的输出
            
            # 将跨注意力缓存添加到当前键/值对的第三和第四位置
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value
        
        # 将注意力输出应用到前馈网络块中,生成层输出
        layer_output = apply_chunking_to_forward(
            self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
        )
        outputs = (layer_output,) + outputs  # 将层输出和之前的输出合并
        
        # 如果是解码器,将注意力键/值作为最后的输出
        if self.is_decoder:
            outputs = outputs + (present_key_value,)
        
        return outputs  # 返回所有输出

    def feed_forward_chunk(self, attention_output):
        # 使用中间层处理注意力输出,生成中间层输出
        intermediate_output = self.intermediate(attention_output)
        
        # 使用输出层处理中间层输出和注意力输出,生成层输出
        layer_output = self.output(intermediate_output, attention_output)
        return layer_output  # 返回层输出
class RoFormerEncoder(nn.Module):
    # RoFormer 编码器模型
    def __init__(self, config):
        super().__init__()
        self.config = config
        # 初始化 RoFormer 的位置编码器
        self.embed_positions = RoFormerSinusoidalPositionalEmbedding(
            config.max_position_embeddings, config.hidden_size // config.num_attention_heads
        )
        # 初始化 RoFormer 的每一层
        self.layer = nn.ModuleList([RoFormerLayer(config) for _ in range(config.num_hidden_layers)])
        self.gradient_checkpointing = False

    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_values=None,
        use_cache=None,
        output_attentions=False,
        output_hidden_states=False,
        return_dict=True,
    ):
        # RoFormer 编码器的前向传播函数
        # (具体实现根据参数进行变化,返回结果为一个字典或者多个张量)
        pass  # 实际前向传播代码未提供,暂时使用 pass 占位


class RoFormerPredictionHeadTransform(nn.Module):
    # RoFormer 预测头部变换模块
    def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.embedding_size)
        # 根据配置选择激活函数
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        # Layer normalization
        self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states):
        # RoFormer 预测头部变换模块的前向传播
        hidden_states = self.dense(hidden_states)
        hidden_states = self.transform_act_fn(hidden_states)
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


class RoFormerLMPredictionHead(nn.Module):
    # RoFormer 语言模型预测头部模块
    def __init__(self, config):
        super().__init__()
        self.transform = RoFormerPredictionHeadTransform(config)
        # 输出权重与输入嵌入相同,但每个标记有一个输出偏置
        self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))
        # 需要一个链接以便在 `resize_token_embeddings` 时正确调整偏置
        self.decoder.bias = self.bias

    def forward(self, hidden_states):
        # RoFormer 语言模型预测头部模块的前向传播
        hidden_states = self.transform(hidden_states)
        hidden_states = self.decoder(hidden_states)
        return hidden_states


# 从 transformers.models.bert.modeling_bert.BertOnlyMLMHead 复制并修改为 RoFormer
class RoFormerOnlyMLMHead(nn.Module):
    # 仅包含 RoFormer 语言模型头部的模块
    def __init__(self, config):
        super().__init__()
        self.predictions = RoFormerLMPredictionHead(config)

    def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
        # RoFormer 仅语言模型头部的前向传播
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class RoFormerPreTrainedModel(PreTrainedModel):
    """
    一个抽象类,处理权重初始化、下载和加载预训练模型的简单接口。
    """

    config_class = RoFormerConfig
    load_tf_weights = load_tf_weights_in_roformer
    base_model_prefix = "roformer"
    # 定义一个类变量,指示是否支持梯度检查点
    supports_gradient_checkpointing = True
    
    # 初始化模型权重的函数
    def _init_weights(self, module):
        """Initialize the weights"""
        # 如果模块是线性层
        if isinstance(module, nn.Linear):
            # 使用正态分布初始化权重数据,均值为0,标准差为配置中的初始化范围
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果存在偏置项,则将其初始化为零
            if module.bias is not None:
                module.bias.data.zero_()
        # 如果模块是 RoFormerSinusoidalPositionalEmbedding 类的实例
        elif isinstance(module, RoFormerSinusoidalPositionalEmbedding):
            # 对于这种情况,不做任何初始化操作
            pass
        # 如果模块是嵌入层
        elif isinstance(module, nn.Embedding):
            # 使用正态分布初始化权重数据,均值为0,标准差为配置中的初始化范围
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果定义了填充索引,则将填充索引对应的权重初始化为零
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        # 如果模块是 LayerNorm 层
        elif isinstance(module, nn.LayerNorm):
            # 初始化偏置为零
            module.bias.data.zero_()
            # 初始化权重为1
            module.weight.data.fill_(1.0)
ROFORMER_START_DOCSTRING = r"""
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`RoFormerConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""

ROFORMER_INPUTS_DOCSTRING = r"""
    Args:
        input_ids (`torch.LongTensor` of shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""


@add_start_docstrings(
    """
    Add model-specific documentation to the given function.

    Args:
        **kwargs: Keyword arguments forwarded to the function.

    This decorator helps in adding standardized documentation string (`ROFORMER_START_DOCSTRING`) to functions.
    """
)
    # 定义 RoFormer 模型的基础类,输出未经特定头部处理的原始隐藏状态
    "The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.",
    # 引入 RoFormer 的文档字符串开头
    ROFORMER_START_DOCSTRING,
# RoFormer 模型类,继承自 RoFormerPreTrainedModel 类
class RoFormerModel(RoFormerPreTrainedModel):
    """
    
    模型可以作为编码器(仅自注意力)或解码器运行,当作解码器时,在自注意力层之间添加了交叉注意力层,
    遵循 [Attention is all you need](https://arxiv.org/abs/1706.03762) 中描述的架构,作者为 Ashish Vaswani,
    Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser 和 Illia Polosukhin.
    
    若要作为解码器运行,模型需要用配置中的 `is_decoder` 参数初始化为 `True`。
    若要在 Seq2Seq 模型中使用,模型需要用 `is_decoder` 参数和 `add_cross_attention` 参数同时初始化为 `True`;
    此时预期在前向传播中输入 `encoder_hidden_states`。
    """

    def __init__(self, config):
        super().__init__(config)
        self.config = config
        # 使用配置初始化 RoFormerEmbeddings 对象
        self.embeddings = RoFormerEmbeddings(config)

        # 如果嵌入大小与隐藏大小不同,初始化线性层以映射嵌入大小到隐藏大小
        if config.embedding_size != config.hidden_size:
            self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)

        # 初始化 RoFormerEncoder 层
        self.encoder = RoFormerEncoder(config)

        # 初始化权重并应用最终处理
        self.post_init()

    def get_input_embeddings(self):
        # 返回嵌入层的 word_embeddings 属性
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        # 设置嵌入层的 word_embeddings 属性为给定值
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        """
        剪枝模型中的注意力头。heads_to_prune: {层编号: 要在该层中剪枝的头列表} 参见基类 PreTrainedModel
        """
        for layer, heads in heads_to_prune.items():
            # 对每一层,调用 RoFormerEncoder 中相应的注意力层对象的 prune_heads 方法
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=BaseModelOutputWithPastAndCrossAttentions,
        config_class=_CONFIG_FOR_DOC,
    )
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    # 初始化类变量,包含需要共享权重的键列表
    _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]

    # 初始化函数,接受一个配置对象作为参数
    def __init__(self, config):
        # 调用父类的初始化方法,传递配置对象
        super().__init__(config)

        # 如果配置指定为解码器,则发出警告,建议设定 `config.is_decoder=False`,以支持双向自注意力
        if config.is_decoder:
            logger.warning(
                "If you want to use `RoFormerForMaskedLM` make sure `config.is_decoder=False` for "
                "bi-directional self-attention."
            )

        # 创建 RoFormerModel 的实例,使用给定的配置对象
        self.roformer = RoFormerModel(config)
        # 创建 RoFormerOnlyMLMHead 的实例,使用给定的配置对象
        self.cls = RoFormerOnlyMLMHead(config)

        # 调用后续初始化方法,用于初始化权重并应用最终处理
        self.post_init()

    # 获取输出嵌入的方法,返回 MLM 头部的解码器
    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    # 设置输出嵌入的方法,接受一个新的嵌入参数,并将其分配给 MLM 头部的解码器
    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings

    # 正向传播方法,接受多个输入参数并返回结果
    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=MaskedLMOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[MaskedLMOutput, Tuple[torch.Tensor]]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
        """
        # 设置返回字典,如果未指定则使用配置中的默认设置
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # 调用 RoFormer 模型进行前向传播
        outputs = self.roformer(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 从 RoFormer 输出中获取序列输出
        sequence_output = outputs[0]
        
        # 通过分类层获取预测分数
        prediction_scores = self.cls(sequence_output)

        masked_lm_loss = None
        if labels is not None:
            # 定义交叉熵损失函数,用于计算 Masked Language Modeling 损失
            loss_fct = CrossEntropyLoss()  # -100 索引表示填充标记
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))

        if not return_dict:
            # 如果不需要返回字典,则按照元组方式返回结果
            output = (prediction_scores,) + outputs[1:]
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

        # 如果需要返回字典,则构建 MaskedLMOutput 对象并返回
        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )

    def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
        input_shape = input_ids.shape
        effective_batch_size = input_shape[0]

        # 添加一个虚拟的 token
        assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
        # 扩展注意力掩码,在最后加入全零列
        attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
        # 创建一个全为填充 token id 的张量,并拼接到输入 ids 后面
        dummy_token = torch.full(
            (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
        )
        input_ids = torch.cat([input_ids, dummy_token], dim=1)

        # 返回输入字典,用于生成过程
        return {"input_ids": input_ids, "attention_mask": attention_mask}
# 为 RoFormer 模型添加文档字符串,用于指明其作为语言建模模型进行 Causal LM 微调
@add_start_docstrings(
    """RoFormer Model with a `language modeling` head on top for CLM fine-tuning.""", ROFORMER_START_DOCSTRING
)
class RoFormerForCausalLM(RoFormerPreTrainedModel):
    # 定义绑定权重的关键键名列表,用于权重共享
    _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"]

    # 初始化方法,接受一个配置对象作为参数
    def __init__(self, config):
        # 调用父类初始化方法
        super().__init__(config)

        # 如果配置中标志不是解码器,则发出警告信息
        if not config.is_decoder:
            logger.warning("If you want to use `RoFormerForCausalLM` as a standalone, add `is_decoder=True.`")

        # 初始化 RoFormer 模型和仅包含 MLM 头部的对象
        self.roformer = RoFormerModel(config)
        self.cls = RoFormerOnlyMLMHead(config)

        # 执行初始化权重和应用最终处理
        self.post_init()

    # 获取输出嵌入的方法,返回预测的解码器
    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    # 设置输出嵌入的方法,用新的嵌入替换预测的解码器
    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings

    # 重写的前向传播方法,包含详细的输入和输出文档字符串
    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        cross_attn_head_mask: Optional[torch.Tensor] = None,
        past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        # 准备用于生成的输入,处理输入形状和 attention_mask
        input_shape = input_ids.shape

        # 如果 attention_mask 为空,则创建全为 1 的新 attention_mask
        if attention_mask is None:
            attention_mask = input_ids.new_ones(input_shape)

        # 如果 past_key_values 不为空,则根据其值截取 input_ids
        if past_key_values is not None:
            past_length = past_key_values[0][0].shape[2]

            # 一些生成方法已经只传递最后一个输入 ID
            if input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                # 默认行为:仅保留最后一个 ID
                remove_prefix_length = input_ids.shape[1] - 1

            input_ids = input_ids[:, remove_prefix_length:]

        # 返回处理后的输入字典
        return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
    # 重新排序缓存数据,以适应新的束搜索索引顺序
    def _reorder_cache(self, past_key_values, beam_idx):
        # 初始化一个空的元组,用于存储重新排序后的过去状态
        reordered_past = ()
        # 遍历每一层的过去状态
        for layer_past in past_key_values:
            # 对每层的过去状态的前两个元素(通常是隐藏状态和注意力权重)按照束搜索索引进行重新排序
            reordered_past += (
                tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
                # 将未重新排序的其余部分直接添加到元组中
                + layer_past[2:],
            )
        # 返回重新排序后的完整过去状态元组
        return reordered_past
class RoFormerClassificationHead(nn.Module):
    """Head for sentence-level classification tasks."""

    def __init__(self, config):
        super().__init__()
        # 定义一个全连接层,输入和输出大小都是 config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 定义一个 dropout 层,用于防止过拟合
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # 定义一个全连接层,将隐藏状态映射到类别数量(config.num_labels)
        self.out_proj = nn.Linear(config.hidden_size, config.num_labels)

        self.config = config

    def forward(self, features, **kwargs):
        # 取序列中的第一个特征向量作为表示句子的向量(等同于 [CLS] 标记)
        x = features[:, 0, :]
        # 对输入进行 dropout 处理
        x = self.dropout(x)
        # 通过全连接层 dense 进行线性变换
        x = self.dense(x)
        # 使用配置中指定的激活函数 ACT2FN[self.config.hidden_act] 对 x 进行激活
        x = ACT2FN[self.config.hidden_act](x)
        # 再次对输出进行 dropout 处理
        x = self.dropout(x)
        # 通过全连接层 out_proj 进行线性变换,得到最终的分类输出
        x = self.out_proj(x)
        return x


@add_start_docstrings(
    """
    RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    """,
    ROFORMER_START_DOCSTRING,
)
# RoFormer 用于序列分类任务的模型,顶部有一个用于分类或回归的线性层(在池化输出之上)
class RoFormerForSequenceClassification(RoFormerPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        # RoFormer 模型的主体部分
        self.roformer = RoFormerModel(config)
        # 分类器部分,用于序列分类任务
        self.classifier = RoFormerClassificationHead(config)

        # 初始化权重并应用最终处理
        self.post_init()

    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=SequenceClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    # 前向传播函数,接受多种输入并返回模型输出
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[SequenceClassifierOutput, Tuple[torch.Tensor]]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        """
        # 如果 `return_dict` 不为 None,则使用传入的值;否则使用配置中的默认值
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # 将输入传递给 RoFormer 模型,获取输出
        outputs = self.roformer(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 获取 RoFormer 模型输出的序列输出
        sequence_output = outputs[0]
        
        # 将序列输出传递给分类器,获取分类 logits
        logits = self.classifier(sequence_output)

        # 初始化损失为 None
        loss = None

        # 如果存在标签,则计算损失
        if labels is not None:
            # 如果问题类型未定义,则根据标签类型和类别数量进行自动推断问题类型
            if self.config.problem_type is None:
                if self.num_labels == 1:
                    self.config.problem_type = "regression"
                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
                    self.config.problem_type = "single_label_classification"
                else:
                    self.config.problem_type = "multi_label_classification"

            # 根据问题类型选择相应的损失函数
            if self.config.problem_type == "regression":
                loss_fct = MSELoss()
                if self.num_labels == 1:
                    # 对于单标签回归任务,计算平均平方误差损失
                    loss = loss_fct(logits.squeeze(), labels.squeeze())
                else:
                    # 对于多标签回归任务,计算平均平方误差损失
                    loss = loss_fct(logits, labels)
            elif self.config.problem_type == "single_label_classification":
                # 对于单标签分类任务,使用交叉熵损失函数
                loss_fct = CrossEntropyLoss()
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            elif self.config.problem_type == "multi_label_classification":
                # 对于多标签分类任务,使用带 logits 的二元交叉熵损失函数
                loss_fct = BCEWithLogitsLoss()
                loss = loss_fct(logits, labels)

        # 如果 `return_dict` 为 False,则返回 logits 和可能的额外输出
        if not return_dict:
            output = (logits,) + outputs[1:]
            return ((loss,) + output) if loss is not None else output

        # 如果 `return_dict` 为 True,则返回包含损失、logits、隐藏状态和注意力的 SequenceClassifierOutput 对象
        return SequenceClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
# 使用 RoFormer 模型进行多选题分类任务,顶部有一个线性层(线性层放在汇总输出之上,并带有 softmax),例如用于 RocStories/SWAG 任务
@add_start_docstrings(
    """
    RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
    softmax) e.g. for RocStories/SWAG tasks.
    """,
    ROFORMER_START_DOCSTRING,
)
class RoFormerForMultipleChoice(RoFormerPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)

        # 初始化 RoFormer 模型
        self.roformer = RoFormerModel(config)
        # 序列汇总层
        self.sequence_summary = SequenceSummary(config)
        # 分类器,线性层,输入大小为隐藏状态的大小,输出为1维
        self.classifier = nn.Linear(config.hidden_size, 1)

        # 初始化权重并应用最终处理
        self.post_init()

    @add_start_docstrings_to_model_forward(
        ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
    )
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=MultipleChoiceModelOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    # 前向传播函数,接收多个输入和可选的标签,返回一个包含输出的命名元组或字典
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        ):
        ) -> Union[MultipleChoiceModelOutput, Tuple[torch.Tensor]]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
            num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
            `input_ids` above)
        """
        # 根据是否返回字典类型决定返回值
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        # 获取输入的选项数量
        num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]

        # 将输入的 `input_ids` 重新形状为二维张量,每行包含一个批次的输入序列
        input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
        # 将注意力掩码 `attention_mask` 重新形状为二维张量
        attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
        # 将标记类型 `token_type_ids` 重新形状为二维张量
        token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None

        # 将嵌入向量 `inputs_embeds` 重新形状为三维张量,每行包含一个批次的嵌入向量序列
        inputs_embeds = (
            inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
            if inputs_embeds is not None
            else None
        )

        # 使用 RoFormer 模型处理输入数据,获取输出结果
        outputs = self.roformer(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 获取序列输出
        sequence_output = outputs[0]

        # 对序列输出进行汇总
        pooled_output = self.sequence_summary(sequence_output)
        # 使用分类器获取 logits
        logits = self.classifier(pooled_output)
        # 将 logits 重新形状为二维张量
        reshaped_logits = logits.view(-1, num_choices)

        # 初始化损失值为 None
        loss = None
        # 如果提供了标签,计算交叉熵损失
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(reshaped_logits, labels)

        # 如果不要求返回字典类型,构建输出元组
        if not return_dict:
            output = (reshaped_logits,) + outputs[1:]
            return ((loss,) + output) if loss is not None else output

        # 如果要求返回字典类型,构建 MultipleChoiceModelOutput 对象
        return MultipleChoiceModelOutput(
            loss=loss,
            logits=reshaped_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
@add_start_docstrings(
    """
    RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    """,
    ROFORMER_START_DOCSTRING,
)



class RoFormerForTokenClassification(RoFormerPreTrainedModel):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels

        # 初始化 RoFormer 模型
        self.roformer = RoFormerModel(config)
        # Dropout 层用于随机失活
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # 分类器线性层,将隐藏状态映射到类别数量
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # 初始化权重并应用最终处理
        self.post_init()

    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=TokenClassifierOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ) -> Union[TokenClassifierOutput, Tuple[torch.Tensor]]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        """
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # RoFormer 模型的前向传播
        outputs = self.roformer(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 获取序列输出
        sequence_output = outputs[0]

        # 应用 dropout
        sequence_output = self.dropout(sequence_output)
        # 将序列输出映射到类别空间
        logits = self.classifier(sequence_output)

        # 计算损失
        loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))

        # 如果不使用 return_dict,则返回元组
        if not return_dict:
            output = (logits,) + outputs[1:]
            return ((loss,) + output) if loss is not None else output

        # 否则,返回 TokenClassifierOutput 对象
        return TokenClassifierOutput(
            loss=loss,
            logits=logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )



@add_start_docstrings(
    """
    RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    """,


    # RoFormer模型,顶部带有用于类似SQuAD的抽取式问答任务的跨度分类头部(在隐藏状态输出之上的线性层,用于计算`span start logits`和`span end logits`)。
    # 此处是一个文档字符串或注释,描述了RoFormer模型及其在问答任务中的应用。
    ROFORMER_START_DOCSTRING,
)
# 结束 RoFormerForQuestionAnswering 类的定义,此处的 ")" 是类定义的结束符号
class RoFormerForQuestionAnswering(RoFormerPreTrainedModel):
    # RoFormerForQuestionAnswering 类的初始化函数,继承自 RoFormerPreTrainedModel 类
    def __init__(self, config):
        # 调用父类 RoFormerPreTrainedModel 的初始化方法
        super().__init__(config)

        # 设置分类标签数量为 2
        config.num_labels = 2
        # 将分类标签数量保存在 self.num_labels 中
        self.num_labels = config.num_labels

        # 初始化 RoFormer 模型,使用给定的配置参数
        self.roformer = RoFormerModel(config)
        # 创建一个线性层,用于答案预测,输入维度为 config.hidden_size,输出维度为 config.num_labels
        self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)

        # 初始化权重并进行最终处理
        # 调用自定义的 post_init 方法
        self.post_init()

    # 为 forward 方法添加模型输入的文档字符串,描述了输入参数的含义
    @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    # 为 forward 方法添加代码示例的文档字符串,展示了如何调用模型以及其返回的结果类型
    @add_code_sample_docstrings(
        checkpoint=_CHECKPOINT_FOR_DOC,
        output_type=QuestionAnsweringModelOutput,
        config_class=_CONFIG_FOR_DOC,
    )
    # 模型的前向传播方法,定义了模型的输入和输出
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        r"""
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        """
        # 如果 return_dict 为 None,则使用模型配置中的默认设置
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # 调用 RoFormer 模型进行推理
        outputs = self.roformer(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )

        # 从 RoFormer 输出中获取序列输出
        sequence_output = outputs[0]

        # 将序列输出传入问答输出层,得到开始和结束位置的 logits
        logits = self.qa_outputs(sequence_output)
        start_logits, end_logits = logits.split(1, dim=-1)
        start_logits = start_logits.squeeze(-1)
        end_logits = end_logits.squeeze(-1)

        total_loss = None
        if start_positions is not None and end_positions is not None:
            # 如果 start_positions 或 end_positions 的维度大于 1,则进行压缩
            if len(start_positions.size()) > 1:
                start_positions = start_positions.squeeze(-1)
            if len(end_positions.size()) > 1:
                end_positions = end_positions.squeeze(-1)
            # 对超出模型输入范围的 start/end positions 进行截断
            ignored_index = start_logits.size(1)
            start_positions = start_positions.clamp(0, ignored_index)
            end_positions = end_positions.clamp(0, ignored_index)

            # 定义交叉熵损失函数,忽略 ignored_index 处的预测
            loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

        # 如果不需要返回字典形式的输出,则返回一个元组
        if not return_dict:
            output = (start_logits, end_logits) + outputs[1:]
            return ((total_loss,) + output) if total_loss is not None else output

        # 返回 QuestionAnsweringModelOutput 类型的对象,包含损失、logits、隐藏状态和注意力
        return QuestionAnsweringModelOutput(
            loss=total_loss,
            start_logits=start_logits,
            end_logits=end_logits,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
posted @ 2024-06-29 16:57  绝不原创的飞龙  阅读(3)  评论(0编辑  收藏  举报