Transformers-源码解析-一百一十-

Transformers 源码解析(一百一十)

.\models\t5\modeling_tf_t5.py

# coding=utf-8
# Copyright 2020 T5 Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 T5 model."""

from __future__ import annotations  # Ensures compatibility with type annotations in older Python versions

import copy
import itertools
import math
import warnings
from typing import Optional, Tuple, Union

import numpy as np
import tensorflow as tf
from tensorflow.compiler.tf2xla.python.xla import dynamic_slice

# Imports specific to the T5 model architecture from Hugging Face libraries
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
    TFBaseModelOutput,
    TFBaseModelOutputWithPastAndCrossAttentions,
    TFSeq2SeqLMOutput,
    TFSeq2SeqModelOutput,
)
from ...modeling_tf_utils import (
    TFCausalLanguageModelingLoss,
    TFModelInputType,
    TFPreTrainedModel,
    get_initializer,
    keras,
    keras_serializable,
    unpack_inputs,
)
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import (
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    logging,
    replace_return_docstrings,
)
from .configuration_t5 import T5Config

# Initialize logger for logging messages from this module
logger = logging.get_logger(__name__)

# List of pre-trained model names available in TF T5 from Hugging Face model hub
_CONFIG_FOR_DOC = "T5Config"

TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "google-t5/t5-small",
    "google-t5/t5-base",
    "google-t5/t5-large",
    "google-t5/t5-3b",
    "google-t5/t5-11b",
    # See all T5 models at https://huggingface.co/models?filter=t5
]

####################################################
# TF 2.0 Models are constructed using Keras imperative API by sub-classing
# - keras.layers.Layer for the layers and
# - TFPreTrainedModel for the models (it-self a sub-class of keras.Model)
####################################################


class TFT5LayerNorm(keras.layers.Layer):
    def __init__(self, hidden_size, epsilon=1e-6, **kwargs):
        """
        Construct a layernorm module in the T5 style No bias and no subtraction of mean.
        """
        super().__init__(**kwargs)
        self.variance_epsilon = epsilon
        self.hidden_size = hidden_size

    def build(self, input_shape):
        """Build shared word embedding layer"""
        # Initialize weight parameter for layer normalization
        self.weight = self.add_weight("weight", shape=(self.hidden_size,), initializer="ones")
        super().build(input_shape)
    # 定义一个方法call,接受hidden_states作为参数
    def call(self, hidden_states):
        # 计算hidden_states张量在最后一个轴上的平均平方值,得到方差
        variance = tf.math.reduce_mean(tf.math.square(hidden_states), axis=-1, keepdims=True)
        # 对hidden_states进行归一化处理,使用方差的倒数加上一个小的常数self.variance_epsilon
        hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon)
        # 返回归一化后的hidden_states乘以权重self.weight的结果
        return self.weight * hidden_states
class TFT5DenseActDense(keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        # 初始化权重的随机正态分布,均值为0,标准差为 config.initializer_factor * (config.d_model**-0.5)
        wi_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (config.d_model**-0.5)
        )
        # 初始化权重的随机正态分布,均值为0,标准差为 config.initializer_factor * (config.d_ff**-0.5)
        wo_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (config.d_ff**-0.5)
        )
        # 创建名为 wi 的 Dense 层,输出维度为 config.d_ff,不使用偏置,使用 wi_initializer 初始化
        self.wi = keras.layers.Dense(
            config.d_ff, use_bias=False, name="wi", kernel_initializer=wi_initializer
        )  # Update init weights as in flax
        # 创建名为 wo 的 Dense 层,输出维度为 config.d_model,不使用偏置,使用 wo_initializer 初始化
        self.wo = keras.layers.Dense(
            config.d_model, use_bias=False, name="wo", kernel_initializer=wo_initializer
        )  # Update init weights as in flax
        # 创建 Dropout 层,使用 config.dropout_rate 的丢弃率
        self.dropout = keras.layers.Dropout(config.dropout_rate)
        # 获取激活函数,根据 config.dense_act_fn 配置
        self.act = get_tf_activation(config.dense_act_fn)
        self.config = config

    def call(self, hidden_states, training=False):
        # 前向传播函数:应用 wi 层到隐藏状态,然后应用激活函数,再进行 dropout,最后应用 wo 层
        hidden_states = self.wi(hidden_states)
        hidden_states = self.act(hidden_states)
        hidden_states = self.dropout(hidden_states, training=training)
        hidden_states = self.wo(hidden_states)
        return hidden_states

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        # 构建层的方法,在此处设置 wi 和 wo 层的输入形状
        if getattr(self, "wi", None) is not None:
            with tf.name_scope(self.wi.name):
                self.wi.build([None, None, self.config.d_model])
        if getattr(self, "wo", None) is not None:
            with tf.name_scope(self.wo.name):
                self.wo.build([None, None, self.config.d_ff])


class TFT5DenseGatedActDense(keras.layers.Layer):
    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        # 初始化权重的随机正态分布,均值为0,标准差为 config.initializer_factor * (config.d_model**-0.5)
        wi_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (config.d_model**-0.5)
        )
        # 初始化权重的随机正态分布,均值为0,标准差为 config.initializer_factor * (config.d_ff**-0.5)
        wo_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (config.d_ff**-0.5)
        )
        # 创建名为 wi_0 的 Dense 层,输出维度为 config.d_ff,不使用偏置,使用 wi_initializer 初始化
        self.wi_0 = keras.layers.Dense(
            config.d_ff, use_bias=False, name="wi_0", kernel_initializer=wi_initializer
        )  # Update init weights as in flax
        # 创建名为 wi_1 的 Dense 层,输出维度为 config.d_ff,不使用偏置,使用 wi_initializer 初始化
        self.wi_1 = keras.layers.Dense(
            config.d_ff, use_bias=False, name="wi_1", kernel_initializer=wi_initializer
        )  # Update init weights as in flax
        # 创建名为 wo 的 Dense 层,输出维度为 config.d_model,不使用偏置,使用 wo_initializer 初始化
        self.wo = keras.layers.Dense(
            config.d_model, use_bias=False, name="wo", kernel_initializer=wo_initializer
        )  # Update init weights as in flax
        # 创建 Dropout 层,使用 config.dropout_rate 的丢弃率
        self.dropout = keras.layers.Dropout(config.dropout_rate)
        # 获取激活函数,根据 config.dense_act_fn 配置
        self.act = get_tf_activation(config.dense_act_fn)
        self.config = config
    # 定义一个方法用于处理模型中的隐藏状态,可以选择是否处于训练模式
    def call(self, hidden_states, training=False):
        # 使用激活函数处理第一个线性层的输出
        hidden_gelu = self.act(self.wi_0(hidden_states))
        # 获取第二个线性层的输出
        hidden_linear = self.wi_1(hidden_states)
        # 将第一个线性层和第二个线性层的输出进行逐元素相乘
        hidden_states = hidden_gelu * hidden_linear
        # 对隐藏状态应用dropout操作
        hidden_states = self.dropout(hidden_states, training=training)
        # 应用输出层的线性变换
        hidden_states = self.wo(hidden_states)
        # 返回处理后的隐藏状态
        return hidden_states

    # 构建方法,用于构建模型的各个层和变量
    def build(self, input_shape=None):
        # 如果模型已经构建过,则直接返回
        if self.built:
            return
        # 标记模型已经构建
        self.built = True
        # 如果存在第一个线性层 wi_0,则构建该层并命名作用域
        if getattr(self, "wi_0", None) is not None:
            with tf.name_scope(self.wi_0.name):
                self.wi_0.build([None, None, self.config.d_model])
        # 如果存在第二个线性层 wi_1,则构建该层并命名作用域
        if getattr(self, "wi_1", None) is not None:
            with tf.name_scope(self.wi_1.name):
                self.wi_1.build([None, None, self.config.d_model])
        # 如果存在输出层 wo,则构建该层并命名作用域
        if getattr(self, "wo", None) is not None:
            with tf.name_scope(self.wo.name):
                self.wo.build([None, None, self.config.d_ff])
# 定义了一个自定义层 TFT5LayerFF,继承自 keras 的 Layer 类
class TFT5LayerFF(keras.layers.Layer):
    # 初始化方法,接收配置参数 config 和其他关键字参数
    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        # 根据配置是否启用门控激活函数,选择不同的 Dense 层组合
        if config.is_gated_act:
            self.DenseReluDense = TFT5DenseGatedActDense(config, name="DenseReluDense")
        else:
            self.DenseReluDense = TFT5DenseActDense(config, name="DenseReluDense")

        # LayerNormalization 层,用于标准化输入张量
        self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm")
        # Dropout 层,用于在训练时随机断开一定比例的输入单元,防止过拟合
        self.dropout = keras.layers.Dropout(config.dropout_rate)

    # call 方法,定义了层的正向传播逻辑
    def call(self, hidden_states, training=False):
        # 对输入张量进行标准化
        normed_hidden_states = self.layer_norm(hidden_states)
        # 将标准化后的张量传入 DenseReluDense 层,得到输出
        dense_output = self.DenseReluDense(normed_hidden_states, training=training)
        # 将原始输入张量与 Dropout 后的输出相加,作为最终的隐藏状态输出
        hidden_states = hidden_states + self.dropout(dense_output, training=training)
        # 返回最终的隐藏状态输出
        return hidden_states

    # build 方法,用于构建层的参数,确保在第一次调用 call 方法之前被调用
    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        # 如果已经定义了 layer_norm 属性,则构建 layer_norm
        if getattr(self, "layer_norm", None) is not None:
            with tf.name_scope(self.layer_norm.name):
                self.layer_norm.build(None)
        # 如果已经定义了 DenseReluDense 属性,则构建 DenseReluDense
        if getattr(self, "DenseReluDense", None) is not None:
            with tf.name_scope(self.DenseReluDense.name):
                self.DenseReluDense.build(None)


# 定义了一个自定义层 TFT5Attention,继承自 keras 的 Layer 类
class TFT5Attention(keras.layers.Layer):
    # 类变量 NEW_ID,用于生成唯一的层标识符
    NEW_ID = itertools.count()
    # 初始化方法,用于初始化一个TFT5Attention对象
    def __init__(self, config, has_relative_attention_bias=False, **kwargs):
        # 调用父类的初始化方法
        super().__init__(**kwargs)
        # 设置当前层的唯一标识符,通过从TFT5Attention.NEW_ID中获取
        self.layer_id = next(TFT5Attention.NEW_ID)
        # 判断当前层是否为解码器(根据传入的config配置)
        self.is_decoder = config.is_decoder
        # 是否使用缓存(根据传入的config配置)
        self.use_cache = config.use_cache
        # 是否包含相对注意力偏置(默认为False,可根据参数设置)
        self.has_relative_attention_bias = has_relative_attention_bias
        # 是否输出注意力权重(根据传入的config配置)
        self.output_attentions = config.output_attentions

        # 相对注意力相关的配置参数
        self.relative_attention_num_buckets = config.relative_attention_num_buckets
        self.relative_attention_max_distance = config.relative_attention_max_distance
        # 模型维度
        self.d_model = config.d_model
        # 键值投影维度
        self.key_value_proj_dim = config.d_kv
        # 注意力头的数量
        self.n_heads = config.num_heads
        # 内部维度,等于注意力头数乘以键值投影维度
        self.inner_dim = self.n_heads * self.key_value_proj_dim

        # 使用Mesh TensorFlow进行初始化,以避免在softmax之前进行缩放
        # 初始化查询矩阵的权重
        q_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5)
        )
        # 初始化键矩阵的权重
        k_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5)
        )
        # 初始化值矩阵的权重
        v_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5)
        )
        # 初始化输出矩阵的权重
        o_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5)
        )
        # 初始化相对注意力偏置的权重
        self.relative_attention_bias_initializer = keras.initializers.RandomNormal(
            mean=0, stddev=config.initializer_factor * (self.inner_dim**-0.5)
        )

        # 定义查询矩阵的全连接层
        self.q = keras.layers.Dense(
            self.inner_dim, use_bias=False, name="q", kernel_initializer=q_initializer
        )  # Update init weights as in flax
        # 定义键矩阵的全连接层
        self.k = keras.layers.Dense(
            self.inner_dim, use_bias=False, name="k", kernel_initializer=k_initializer
        )  # Update init weights as in flax
        # 定义值矩阵的全连接层
        self.v = keras.layers.Dense(
            self.inner_dim, use_bias=False, name="v", kernel_initializer=v_initializer
        )  # Update init weights as in flax
        # 定义输出矩阵的全连接层
        self.o = keras.layers.Dense(
            self.d_model, use_bias=False, name="o", kernel_initializer=o_initializer
        )  # Update init weights as in flax
        # 定义dropout层,用于随机失活
        self.dropout = keras.layers.Dropout(config.dropout_rate)

        # 初始化被剪枝的注意力头集合
        self.pruned_heads = set()
    # 如果模型已经构建,则直接返回,不进行重复构建
    def build(self, input_shape=None):
        if self.built:
            return
        # 将模型标记为已构建状态
        self.built = True
        # 如果具有相对注意力偏置,则添加相对注意力偏置权重
        if self.has_relative_attention_bias:
            with tf.name_scope("relative_attention_bias"):
                # 添加一个权重张量用于相对注意力偏置,形状为 [相对注意力桶数, 注意力头数]
                self.relative_attention_bias = self.add_weight(
                    name="embeddings",
                    shape=[self.relative_attention_num_buckets, self.n_heads],
                    initializer=self.relative_attention_bias_initializer,  # 使用给定的初始化器进行初始化
                )
        # 如果存在 q 属性,则构建 q 层,并指定形状为 [None, None, self.d_model]
        if getattr(self, "q", None) is not None:
            with tf.name_scope(self.q.name):
                self.q.build([None, None, self.d_model])
        # 如果存在 k 属性,则构建 k 层,并指定形状为 [None, None, self.d_model]
        if getattr(self, "k", None) is not None:
            with tf.name_scope(self.k.name):
                self.k.build([None, None, self.d_model])
        # 如果存在 v 属性,则构建 v 层,并指定形状为 [None, None, self.d_model]
        if getattr(self, "v", None) is not None:
            with tf.name_scope(self.v.name):
                self.v.build([None, None, self.d_model])
        # 如果存在 o 属性,则构建 o 层,并指定形状为 [None, None, self.inner_dim]
        if getattr(self, "o", None) is not None:
            with tf.name_scope(self.o.name):
                self.o.build([None, None, self.inner_dim])

    # 抛出未实现错误,表明 prune_heads 方法尚未实现
    def prune_heads(self, heads):
        raise NotImplementedError

    @staticmethod
    def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
        """
        Adapted from Mesh Tensorflow:
        https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593

        Translate relative position to a bucket number for relative attention. The relative position is defined as
        memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
        position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
        small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
        positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
        This should allow for more graceful generalization to longer sequences than the model has been trained on

        Args:
            relative_position: an int32 Tensor - the relative positions between memory and query
            bidirectional: a boolean - whether the attention is bidirectional or not
            num_buckets: an integer - number of buckets to map relative positions into
            max_distance: an integer - maximum distance to consider for bucketing

        Returns:
            a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
        """
        # Initialize relative_buckets to 0
        relative_buckets = 0
        
        # Adjust num_buckets if bidirectional is True
        if bidirectional:
            num_buckets //= 2
            # Add num_buckets to relative_buckets if relative_position > 0
            relative_buckets += (
                tf.cast(tf.math.greater(relative_position, 0), dtype=relative_position.dtype) * num_buckets
            )
            # Take absolute value of relative_position
            relative_position = tf.math.abs(relative_position)
        else:
            # Set relative_position to negative minimum value if it is <= 0
            relative_position = -tf.math.minimum(relative_position, 0)
        
        # Calculate max_exact as half of num_buckets
        max_exact = num_buckets // 2
        
        # Check if relative_position is less than max_exact
        is_small = tf.math.less(relative_position, max_exact)
        
        # Calculate relative_position_if_large using logarithmic scaling
        relative_position_if_large = max_exact + tf.cast(
            tf.math.log(tf.cast(relative_position, tf.float32) / tf.cast(max_exact, tf.float32))
            / math.log(max_distance / max_exact)
            * (num_buckets - max_exact),
            dtype=relative_position.dtype,
        )
        
        # Clamp relative_position_if_large to num_buckets - 1
        relative_position_if_large = tf.math.minimum(relative_position_if_large, num_buckets - 1)
        
        # Add relative_position or relative_position_if_large to relative_buckets based on is_small condition
        relative_buckets += tf.where(is_small, relative_position, relative_position_if_large)
        
        # Return the computed relative_buckets
        return relative_buckets
    def compute_bias(self, query_length, key_length):
        """Compute binned relative position bias"""
        # 生成一个形状为 (query_length, 1) 的张量,表示查询位置
        context_position = tf.range(query_length)[:, None]
        # 生成一个形状为 (1, key_length) 的张量,表示记忆位置
        memory_position = tf.range(key_length)[None, :]
        # 计算相对位置矩阵,形状为 (query_length, key_length),每个元素表示相对位置的差值
        relative_position = memory_position - context_position

        # 将相对位置矩阵映射到预定义数量的桶中,以便后续注意力机制使用
        relative_position_bucket = self._relative_position_bucket(
            relative_position,
            bidirectional=(not self.is_decoder),
            num_buckets=self.relative_attention_num_buckets,
            max_distance=self.relative_attention_max_distance,
        )

        # 根据桶的索引,从预定义的相对注意力偏置中收集对应的值
        values = tf.gather(
            self.relative_attention_bias, relative_position_bucket
        )  # 形状为 (query_length, key_length, num_heads)

        # 调整维度顺序,以符合注意力机制期望的输入形状
        values = tf.expand_dims(
            tf.transpose(values, [2, 0, 1]), axis=0
        )  # 形状为 (1, num_heads, query_length, key_length)

        # 返回调整后的相对注意力偏置值
        return values

    def call(
        self,
        hidden_states,
        mask=None,
        key_value_states=None,
        position_bias=None,
        past_key_value=None,
        layer_head_mask=None,
        query_length=None,
        use_cache=False,
        training=False,
        output_attentions=False,
# 定义了一个名为 TFT5LayerSelfAttention 的自定义层,继承自 keras.layers.Layer
class TFT5LayerSelfAttention(keras.layers.Layer):
    # 初始化方法,接受 config 和其他参数
    def __init__(self, config, has_relative_attention_bias=False, **kwargs):
        super().__init__(**kwargs)
        # 创建 TFT5Attention 实例 SelfAttention,用于自注意力机制
        self.SelfAttention = TFT5Attention(
            config,
            has_relative_attention_bias=has_relative_attention_bias,
            name="SelfAttention",
        )
        # 创建 layer_norm 层,用于层归一化
        self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm")
        # 创建 dropout 层,用于随机失活
        self.dropout = keras.layers.Dropout(config.dropout_rate)

    # 定义 call 方法,处理层的正向传播
    def call(
        self,
        hidden_states,
        attention_mask=None,
        position_bias=None,
        layer_head_mask=None,
        past_key_value=None,
        use_cache=False,
        output_attentions=False,
        training=False,
    ):
        # 对输入的 hidden_states 进行层归一化
        normed_hidden_states = self.layer_norm(hidden_states)
        # 调用 SelfAttention 层进行自注意力计算
        attention_output = self.SelfAttention(
            normed_hidden_states,
            mask=attention_mask,
            position_bias=position_bias,
            layer_head_mask=layer_head_mask,
            past_key_value=past_key_value,
            use_cache=use_cache,
            output_attentions=output_attentions,
            training=training,
        )
        # 将原始 hidden_states 和经过 dropout 后的 attention_output 相加
        hidden_states = hidden_states + self.dropout(attention_output[0], training=training)
        # 如果需要输出 attentions,则将它们添加到 outputs 中
        outputs = (hidden_states,) + attention_output[1:]  # add attentions if we output them
        return outputs

    # 定义 build 方法,用于构建层
    def build(self, input_shape=None):
        # 如果已经构建过,直接返回
        if self.built:
            return
        self.built = True
        # 构建 SelfAttention 层
        if getattr(self, "SelfAttention", None) is not None:
            with tf.name_scope(self.SelfAttention.name):
                self.SelfAttention.build(None)
        # 构建 layer_norm 层
        if getattr(self, "layer_norm", None) is not None:
            with tf.name_scope(self.layer_norm.name):
                self.layer_norm.build(None)


# 定义了一个名为 TFT5LayerCrossAttention 的自定义层,继承自 keras.layers.Layer
class TFT5LayerCrossAttention(keras.layers.Layer):
    # 初始化方法,接受 config 和其他参数
    def __init__(self, config, **kwargs):
        super().__init__(**kwargs)
        # 创建 TFT5Attention 实例 EncDecAttention,用于编码-解码注意力机制
        self.EncDecAttention = TFT5Attention(
            config,
            has_relative_attention_bias=False,
            name="EncDecAttention",
        )
        # 创建 layer_norm 层,用于层归一化
        self.layer_norm = TFT5LayerNorm(config.d_model, epsilon=config.layer_norm_epsilon, name="layer_norm")
        # 创建 dropout 层,用于随机失活
        self.dropout = keras.layers.Dropout(config.dropout_rate)

    # 定义 call 方法,处理层的正向传播
    def call(
        self,
        hidden_states,
        key_value_states,
        attention_mask=None,
        position_bias=None,
        layer_head_mask=None,
        past_key_value=None,
        query_length=None,
        use_cache=False,
        output_attentions=False,
        training=False,
    ):
        # 对输入的 hidden_states 进行层归一化
        normed_hidden_states = self.layer_norm(hidden_states)
        # 调用 EncDecAttention 层进行编码-解码注意力计算
        attention_output = self.EncDecAttention(
            normed_hidden_states,
            key_value_states,
            mask=attention_mask,
            position_bias=position_bias,
            layer_head_mask=layer_head_mask,
            past_key_value=past_key_value,
            query_length=query_length,
            use_cache=use_cache,
            output_attentions=output_attentions,
            training=training,
        )
        # 将原始 hidden_states 和经过 dropout 后的 attention_output 相加
        hidden_states = hidden_states + self.dropout(attention_output[0], training=training)
        # 如果需要输出 attentions,则将它们添加到 outputs 中
        outputs = (hidden_states,) + attention_output[1:]  # add attentions if we output them
        return outputs
    ):
        # 对隐藏状态进行层归一化处理
        normed_hidden_states = self.layer_norm(hidden_states)
        # 使用编码-解码注意力机制进行计算
        attention_output = self.EncDecAttention(
            normed_hidden_states,
            mask=attention_mask,
            key_value_states=key_value_states,
            position_bias=position_bias,
            layer_head_mask=layer_head_mask,
            past_key_value=past_key_value,
            query_length=query_length,
            use_cache=use_cache,
            output_attentions=output_attentions,
            training=training,
        )
        # 将注意力输出与隐藏状态相加,并使用 dropout 进行处理
        hidden_states = hidden_states + self.dropout(attention_output[0], training=training)
        # 构建输出元组,包括隐藏状态和可能的注意力输出
        outputs = (hidden_states,) + attention_output[1:]  # 如果有输出的话,添加注意力
        # 返回模块的输出
        return outputs

    def build(self, input_shape=None):
        # 如果已经构建过,直接返回
        if self.built:
            return
        self.built = True
        # 如果存在编码-解码注意力模块,构建它
        if getattr(self, "EncDecAttention", None) is not None:
            with tf.name_scope(self.EncDecAttention.name):
                self.EncDecAttention.build(None)
        # 如果存在层归一化模块,构建它
        if getattr(self, "layer_norm", None) is not None:
            with tf.name_scope(self.layer_norm.name):
                self.layer_norm.build(None)
# 定义自定义层类 TFT5Block,继承自 keras 的 Layer 类
class TFT5Block(keras.layers.Layer):
    # 初始化方法,接受配置 config 和是否具有相对注意力偏置的参数
    def __init__(self, config, has_relative_attention_bias=False, **kwargs):
        super().__init__(**kwargs)
        # 标记是否为解码器
        self.is_decoder = config.is_decoder
        # 初始化层列表
        self.layer = []
        # 添加自注意力层 TFT5LayerSelfAttention 到层列表
        self.layer.append(
            TFT5LayerSelfAttention(
                config,
                has_relative_attention_bias=has_relative_attention_bias,
                name="layer_._0",  # 设置层的名称
            )
        )
        # 如果是解码器,添加交叉注意力层 TFT5LayerCrossAttention 到层列表
        if self.is_decoder:
            self.layer.append(
                TFT5LayerCrossAttention(
                    config,
                    name="layer_._1",  # 设置层的名称
                )
            )
        # 添加前馈神经网络层 TFT5LayerFF 到层列表
        self.layer.append(TFT5LayerFF(config, name=f"layer_._{len(self.layer)}"))

    # 定义调用方法,传入各种参数,进行层的调用
    def call(
        self,
        hidden_states,
        attention_mask=None,
        position_bias=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        encoder_decoder_position_bias=None,
        layer_head_mask=None,
        encoder_layer_head_mask=None,
        past_key_value=None,
        use_cache=False,
        output_attentions=False,
        training=False,
    ):
        # 省略具体实现细节,用于层的前向传播计算

    # 构建方法,用于构建层,并将其加入到模型中
    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        # 遍历层列表,为每一层设置名称作用域并构建它们
        for layer_module in self.layer:
            if hasattr(layer_module, "name"):
                with tf.name_scope(layer_module.name):
                    layer_module.build(None)


####################################################
# TFT5MainLayer 是一个 keras 的自定义层,用于表示完整的 T5 模型主体
# 通常称为 "TFT5MainLayer"
####################################################
@keras_serializable
class TFT5MainLayer(keras.layers.Layer):
    # 配置类为 T5Config
    config_class = T5Config

    # 初始化方法,接受配置 config 和嵌入标记 embed_tokens 的参数
    def __init__(self, config, embed_tokens=None, **kwargs):
        super().__init__(**kwargs)

        # 设置模型的配置和各种输出选项
        self.config = config
        self.output_hidden_states = config.output_hidden_states
        self.output_attentions = config.output_attentions
        self.use_cache = config.use_cache

        # 嵌入标记,用于输入的词嵌入
        self.embed_tokens = embed_tokens
        # 标记是否为解码器
        self.is_decoder = config.is_decoder

        # 设置模型配置和隐藏层数量
        self.config = config
        self.num_hidden_layers = config.num_layers

        # 创建 T5 模型的每一个块 TFT5Block,并添加到 block 列表中
        self.block = [
            TFT5Block(config, has_relative_attention_bias=bool(i == 0), name=f"block_._{i}")
            for i in range(config.num_layers)
        ]

        # 最终层归一化,用于最终输出
        self.final_layer_norm = TFT5LayerNorm(
            config.d_model, epsilon=config.layer_norm_epsilon, name="final_layer_norm"
        )
        # dropout 层,用于模型的正则化
        self.dropout = keras.layers.Dropout(config.dropout_rate)

    # 用于剪枝特定头部的方法,当前未在 TF 2.0 模型库中实现
    def _prune_heads(self, heads_to_prune):
        raise NotImplementedError  # Not implemented yet in the library for TF 2.0 models
    # 定义一个方法 `call`,用于模型推理过程中的前向传播
    def call(
        self,
        input_ids=None,
        attention_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        inputs_embeds=None,
        head_mask=None,
        encoder_head_mask=None,
        past_key_values=None,
        use_cache=None,
        output_attentions=None,
        output_hidden_states=None,
        return_dict=None,
        training=False,
    ):
        # 如果模型已经构建完成,直接返回,避免重复构建
        if self.built:
            return
        # 标记模型已经构建完成
        self.built = True
        # 如果存在最终层规范化模块,构建其内部结构
        if getattr(self, "final_layer_norm", None) is not None:
            # 在命名空间 `final_layer_norm` 下构建最终层规范化模块
            with tf.name_scope(self.final_layer_norm.name):
                self.final_layer_norm.build(None)
        # 如果存在块结构,逐层构建每个块
        if getattr(self, "block", None) is not None:
            # 遍历模型的每个块
            for layer in self.block:
                # 在每个层的命名空间下构建该层
                with tf.name_scope(layer.name):
                    layer.build(None)
####################################################
# TFT5PreTrainedModel is a sub-class of keras.Model
# which take care of loading and saving pretrained weights
# and various common utilities.
# Here you just need to specify a few (self-explanatory)
# pointers for your model.
####################################################
class TFT5PreTrainedModel(TFPreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

    # Specifies the configuration class to be used for this model
    config_class = T5Config

    # Prefix used to identify the base model within the saved weights
    base_model_prefix = "transformer"

    # List of keys representing layers that are authorized to be missing or unexpected during model loading
    _keys_to_ignore_on_load_unexpected = [
        r"decoder\Wblock[\W_0]+layer[\W_1]+EncDecAttention\Wrelative_attention_bias"
    ]

    def get_input_embeddings(self):
        # Returns the shared input embeddings for the model
        return self.shared

    def set_input_embeddings(self, value):
        # Sets the shared input embeddings for the model and updates related components
        self.shared = value
        self.encoder.embed_tokens = self.shared
        if hasattr(self, "decoder"):
            self.decoder.embed_tokens = self.shared

    def _shift_right(self, input_ids):
        # Retrieves necessary configuration parameters
        decoder_start_token_id = self.config.decoder_start_token_id
        pad_token_id = self.config.pad_token_id

        # Asserts that decoder_start_token_id is defined
        assert decoder_start_token_id is not None, (
            "self.model.config.decoder_start_token_id has to be defined. In TF T5 it is usually set to the"
            " pad_token_id. See T5 docs for more information"
        )

        # Constructs start_tokens tensor to prepend to input_ids
        start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)
        start_tokens = tf.cast(start_tokens, input_ids.dtype)  # Ensures dtype compatibility for concatenation
        shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)

        # Asserts that pad_token_id is defined
        assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."

        # Replaces -100 values in shifted_input_ids with pad_token_id
        shifted_input_ids = tf.where(
            shifted_input_ids == -100,
            tf.cast(tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids.dtype),
            shifted_input_ids,
        )

        # Verifies that shifted_input_ids contains only positive values and -100
        assert_gte0 = tf.debugging.assert_greater_equal(
            shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)
        )

        # Ensures the assertion op is called by wrapping the result in an identity operation
        with tf.control_dependencies([assert_gte0]):
            shifted_input_ids = tf.identity(shifted_input_ids)

        return shifted_input_ids


T5_START_DOCSTRING = r"""

    The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text
    Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan
    Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a
    # 文档字符串,描述了模型类的基本信息和用法说明。
    # 该模型继承自TFPreTrainedModel类,可以查看其超类文档以了解库实现的通用方法,
    # 如下载或保存模型、调整输入嵌入、剪枝头等。
    # 该模型也是一个keras.Model子类,可以像普通的TF 2.0 Keras模型一样使用,
    # 并参考TF 2.0文档中有关一般用法和行为的信息。
    
    # 提示(Tip)部分:
    # transformers库中的TensorFlow模型和层接受两种输入格式:
    # 1. 所有输入作为关键字参数(类似于PyTorch模型);
    # 2. 所有输入作为列表、元组或字典传递给第一个位置参数。
    # 第二种格式得到支持是因为Keras方法更倾向于使用这种格式将输入传递给模型和层。
    # 因此,当使用model.fit()等方法时,只需传递您支持的任何格式的输入和标签即可“正常工作”!
    # 然而,如果您想在Keras方法之外(如在使用Keras Functional API创建自己的层或模型时)使用第二种格式,
    # 您可以使用以下三种可能性将所有输入Tensor聚集到第一个位置参数中。
    
    # 参数部分:
    # config参数接受一个T5Config类的实例,其中包含模型的所有参数。
    # 使用配置文件初始化模型不会加载与模型关联的权重,只加载配置。
    # 可以查看~PreTrainedModel.from_pretrained方法来加载模型的权重。
"""

T5_INPUTS_DOCSTRING = r"""
"""

T5_ENCODER_INPUTS_DOCSTRING = r"""
    Args:
        inputs (`tf.Tensor` of shape `(batch_size, sequence_length)`):
            输入序列标记在词汇表中的索引。T5 是一个具有相对位置嵌入的模型,因此可以在右侧或左侧填充输入。
            
            可以使用 [`AutoTokenizer`] 获取索引。详细信息请参阅 [`PreTrainedTokenizer.__call__`] 和 [`PreTrainedTokenizer.encode`]。
            
            若要了解有关预训练的 `inputs` 准备的更多信息,请查看 [T5 Training](./t5#training)。
        attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            遮罩,避免在填充的标记索引上执行注意力操作。遮罩的值选择在 `[0, 1]` 之间:

            - 1 表示**未遮罩**的标记,
            - 0 表示**遮罩**的标记。

            [什么是注意力遮罩?](../glossary#attention-mask)
        inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            可选,可以直接传递嵌入表示而不是 `input_ids`。如果要比模型的内部嵌入查找矩阵更精确地控制如何将 `input_ids` 索引转换为相关联的向量,这很有用。
        head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            用于取消选择自注意模块的特定头部的遮罩。遮罩的值选择在 `[0, 1]` 之间:

            - 1 表示**未遮罩**的头部,
            - 0 表示**遮罩**的头部。

        output_attentions (`bool`, *optional*):
            是否返回所有注意力层的注意力张量。有关更多详细信息,请参阅返回张量中的 `attentions`。
        output_hidden_states (`bool`, *optional*):
            是否返回所有层的隐藏状态。有关更多详细信息,请参阅返回张量中的 `hidden_states`。
        return_dict (`bool`, *optional*):
            是否返回 [`~utils.ModelOutput`] 而不是普通元组。
        training (`bool`, *optional*, 默认为 `False`):
            是否在训练模式下使用模型(一些模块如 dropout 在训练和评估之间有不同行为)。
"""

_HEAD_MASK_WARNING_MSG = """
输入参数 `head_mask` 已分为两个参数 `head_mask` 和 `decoder_head_mask`。目前,`decoder_head_mask` 被设置为复制 `head_mask`,但此功能已被弃用,并将在未来版本中移除。
如果现在不想使用任何 `decoder_head_mask`,请设置 `decoder_head_mask = tf.ones((num_layers, num_heads))`。
"""


@add_start_docstrings(
    "The bare T5 Model transformer outputting raw hidden-states without any specific head on top.",
    T5_START_DOCSTRING,
)
class TFT5Model(TFT5PreTrainedModel):
    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)

        # 创建一个共享的嵌入层,用于模型的输入和输出
        self.shared = keras.layers.Embedding(
            input_dim=config.vocab_size,
            output_dim=config.d_model,
            embeddings_initializer=keras.initializers.TruncatedNormal(self.config.initializer_factor),
            name="shared",
        )
        # 添加额外的属性,用于指定层的名称作用域(用于加载/存储权重)
        self.shared.load_weight_prefix = "shared"

        # 复制编码器配置,设置不使用缓存,然后创建编码器层
        encoder_config = copy.deepcopy(config)
        encoder_config.use_cache = False
        self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder")

        # 复制解码器配置,设置为解码器,指定解码器层数,然后创建解码器层
        decoder_config = copy.deepcopy(config)
        decoder_config.is_decoder = True
        decoder_config.num_layers = config.num_decoder_layers
        self.decoder = TFT5MainLayer(decoder_config, self.shared, name="decoder")

    def get_encoder(self):
        return self.encoder

    def get_decoder(self):
        return self.decoder

    @unpack_inputs
    @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(
        self,
        input_ids: TFModelInputType | None = None,
        attention_mask: np.ndarray | tf.Tensor | None = None,
        decoder_input_ids: np.ndarray | tf.Tensor | None = None,
        decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
        head_mask: np.ndarray | tf.Tensor | None = None,
        decoder_head_mask: np.ndarray | tf.Tensor | None = None,
        encoder_outputs: np.ndarray | tf.Tensor | None = None,
        past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
        inputs_embeds: np.ndarray | tf.Tensor | None = None,
        decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        training: Optional[bool] = False,
    ):
        # 模型的正向传播函数,支持多种输入和输出配置,详细说明见相关文档
        pass

    def build(self, input_shape=None):
        if self.built:
            return
        self.built = True
        # 共享的/绑定的权重需要在模型基本命名空间中
        # 在 tf.name_scope 的末尾添加 "/" 将其放置在根命名空间而不是当前命名空间
        with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
            self.shared.build(None)
        if getattr(self, "encoder", None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        if getattr(self, "decoder", None) is not None:
            with tf.name_scope(self.decoder.name):
                self.decoder.build(None)
@add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING)
class TFT5ForConditionalGeneration(TFT5PreTrainedModel, TFCausalLanguageModelingLoss):
    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        self.model_dim = config.d_model
        self.shared = keras.layers.Embedding(
            config.vocab_size,
            config.d_model,
            name="shared",
            embeddings_initializer=get_initializer(self.config.initializer_factor),
        )
        # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
        self.shared.load_weight_prefix = "shared"  # 设置共享层的权重前缀为 "shared"

        encoder_config = copy.deepcopy(config)
        encoder_config.use_cache = False  # 禁用编码器的缓存
        self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder")  # 初始化编码器模型

        decoder_config = copy.deepcopy(config)
        decoder_config.is_decoder = True
        decoder_config.num_layers = config.num_decoder_layers
        self.decoder = TFT5MainLayer(decoder_config, self.shared, name="decoder")  # 初始化解码器模型

        if not config.tie_word_embeddings:
            lm_head_initializer = keras.initializers.RandomNormal(mean=0, stddev=config.initializer_factor)
            self.lm_head = keras.layers.Dense(
                config.vocab_size, use_bias=False, name="lm_head", kernel_initializer=lm_head_initializer
            )  # 初始化语言建模头部的全连接层,用于生成词汇表中的单词

        self.config = config

    def get_output_embeddings(self):
        if self.config.tie_word_embeddings:
            return self.get_input_embeddings()  # 如果词嵌入是共享的,则返回输入嵌入
        else:
            # 在密集层中,核的形状为 (last_dim, units),对于我们来说是 (dim, num_tokens)
            # value 的形状是 (num_tokens, dim),因此需要转置
            return tf.transpose(self.lm_head.kernel)  # 返回语言建模头部的权重的转置

    def set_output_embeddings(self, value):
        if self.config.tie_word_embeddings:
            self.set_input_embeddings(value)  # 如果词嵌入是共享的,则设置输入嵌入
        else:
            lm_head_initializer = keras.initializers.RandomNormal(mean=0, stddev=self.config.initializer_factor)
            self.lm_head = keras.layers.Dense(
                shape_list(value)[0], use_bias=False, name="lm_head", kernel_initializer=lm_head_initializer
            )  # 初始化语言建模头部的全连接层,用于生成词汇表中的单词
            # 在密集层中,核的形状为 (last_dim, units),对于我们来说是 (dim, num_tokens)
            # value 的形状是 (num_tokens, dim),因此需要转置
            transposed_value = tf.transpose(value)
            self.lm_head.kernel = transposed_value  # 设置语言建模头部的权重为给定值的转置

    def get_encoder(self):
        return self.encoder  # 返回编码器模型

    def get_decoder(self):
        return self.decoder  # 返回解码器模型

    @unpack_inputs
    @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)  # 将 T5 输入文档字符串添加到模型前向方法
    @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
    def call(
        self,
        input_ids: TFModelInputType | None = None,  # 定义输入参数 input_ids,类型为 TFModelInputType 或 None
        attention_mask: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 attention_mask,类型为 np.ndarray 或 tf.Tensor 或 None
        decoder_input_ids: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 decoder_input_ids,类型为 np.ndarray 或 tf.Tensor 或 None
        decoder_attention_mask: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 decoder_attention_mask,类型为 np.ndarray 或 tf.Tensor 或 None
        head_mask: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 head_mask,类型为 np.ndarray 或 tf.Tensor 或 None
        decoder_head_mask: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 decoder_head_mask,类型为 np.ndarray 或 tf.Tensor 或 None
        encoder_outputs: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 encoder_outputs,类型为 np.ndarray 或 tf.Tensor 或 None
        past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,  # 定义输入参数 past_key_values,类型为 Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] 或 None
        inputs_embeds: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 inputs_embeds,类型为 np.ndarray 或 tf.Tensor 或 None
        decoder_inputs_embeds: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 decoder_inputs_embeds,类型为 np.ndarray 或 tf.Tensor 或 None
        labels: np.ndarray | tf.Tensor | None = None,  # 定义输入参数 labels,类型为 np.ndarray 或 tf.Tensor 或 None
        use_cache: Optional[bool] = None,  # 定义输入参数 use_cache,类型为 Optional[bool] 或 None
        output_attentions: Optional[bool] = None,  # 定义输入参数 output_attentions,类型为 Optional[bool] 或 None
        output_hidden_states: Optional[bool] = None,  # 定义输入参数 output_hidden_states,类型为 Optional[bool] 或 None
        return_dict: Optional[bool] = None,  # 定义输入参数 return_dict,类型为 Optional[bool] 或 None
        training: Optional[bool] = False,  # 定义输入参数 training,类型为 Optional[bool] 或 False
    ):
        # 此方法用于执行 Seq2Seq 模型的前向计算,接受多种输入参数,返回 TFSeq2SeqLMOutput 类型的输出结果

    def serving_output(self, output):
        pkv = tf.convert_to_tensor(output.past_key_values[1:]) if self.config.use_cache else None  # 根据 config 中的 use_cache 设置是否转换 past_key_values 为张量 pkv
        dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None  # 根据 config 中的 output_hidden_states 设置是否转换 decoder_hidden_states 为张量 dec_hs
        dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None  # 根据 config 中的 output_attentions 设置是否转换 decoder_attentions 为张量 dec_attns
        cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None  # 根据 config 中的 output_attentions 设置是否转换 cross_attentions 为张量 cross_attns
        enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None  # 根据 config 中的 output_hidden_states 设置是否转换 encoder_hidden_states 为张量 enc_hs
        enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None  # 根据 config 中的 output_attentions 设置是否转换 encoder_attentions 为张量 enc_attns

        return TFSeq2SeqLMOutput(
            logits=output.logits,  # 输出 logits
            past_key_values=pkv,  # 输出 past_key_values
            decoder_hidden_states=dec_hs,  # 输出 decoder_hidden_states
            decoder_attentions=dec_attns,  # 输出 decoder_attentions
            cross_attentions=cross_attns,  # 输出 cross_attentions
            encoder_last_hidden_state=output.encoder_last_hidden_state,  # 输出 encoder_last_hidden_state
            encoder_hidden_states=enc_hs,  # 输出 encoder_hidden_states
            encoder_attentions=enc_attns,  # 输出 encoder_attentions
        )

    def prepare_inputs_for_generation(
        self,
        input_ids,  # 输入参数 input_ids
        past_key_values=None,  # 输入参数 past_key_values,默认为 None
        attention_mask=None,  # 输入参数 attention_mask,默认为 None
        decoder_attention_mask=None,  # 输入参数 decoder_attention_mask,默认为 None
        head_mask=None,  # 输入参数 head_mask,默认为 None
        decoder_head_mask=None,  # 输入参数 decoder_head_mask,默认为 None
        use_cache=None,  # 输入参数 use_cache,默认为 None
        encoder_outputs=None,  # 输入参数 encoder_outputs,默认为 None
        **kwargs,  # 其他关键字参数,不做具体注释
    ):
        # 根据是否使用过去的键值对 past_key_values,截取 input_ids 的最后一个 token
        if past_key_values is not None:
            input_ids = input_ids[:, -1:]

        return {
            "input_ids": None,  # 需要传递该参数以确保 Keras.layer.__call__ 正常运行
            "decoder_input_ids": input_ids,  # 返回处理后的 decoder_input_ids
            "past_key_values": past_key_values,  # 返回 past_key_values
            "encoder_outputs": encoder_outputs,  # 返回 encoder_outputs
            "attention_mask": attention_mask,  # 返回 attention_mask
            "decoder_attention_mask": decoder_attention_mask,  # 返回 decoder_attention_mask
            "head_mask": head_mask,  # 返回 head_mask
            "decoder_head_mask": decoder_head_mask,  # 返回 decoder_head_mask
            "use_cache": use_cache,  # 返回 use_cache
        }
    # 从标签中生成解码器的输入 ID,通过将标签向右移动一位来实现
    def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
        return self._shift_right(labels)

    # 构建模型的方法
    def build(self, input_shape=None):
        # 如果模型已经构建,则直接返回
        if self.built:
            return
        # 设置模型已构建的标志
        self.built = True
        
        # 共享/共用权重预期位于模型基本命名空间中
        # 将"/"添加到 tf.name_scope 的末尾(而不是开头!)将其放置在根命名空间而不是当前命名空间中
        with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
            # 构建共享层
            self.shared.build(None)
        
        # 如果存在编码器,则构建编码器
        if getattr(self, "encoder", None) is not None:
            with tf.name_scope(self.encoder.name):
                self.encoder.build(None)
        
        # 如果存在解码器,则构建解码器
        if getattr(self, "decoder", None) is not None:
            with tf.name_scope(self.decoder.name):
                self.decoder.build(None)
        
        # 如果存在语言模型头部,则构建语言模型头部
        if getattr(self, "lm_head", None) is not None:
            with tf.name_scope(self.lm_head.name):
                # 构建语言模型头部,输入形状为 [None, None, self.config.d_model]
                self.lm_head.build([None, None, self.config.d_model])
@add_start_docstrings(
    "The bare T5 Model transformer outputting encoder's raw hidden-stateswithout any specific head on top.",
    T5_START_DOCSTRING,
)
class TFT5EncoderModel(TFT5PreTrainedModel):
    def __init__(self, config, *inputs, **kwargs):
        super().__init__(config, *inputs, **kwargs)
        # 定义共享的嵌入层,用于输入数据的编码
        self.shared = keras.layers.Embedding(
            config.vocab_size,
            config.d_model,
            name="shared",
            embeddings_initializer=get_initializer(self.config.initializer_factor),
        )
        # 加载权重时用于指定层的名称范围
        self.shared.load_weight_prefix = "shared"

        # 复制配置以用于编码器,并禁用缓存以确保每次调用都是独立的
        encoder_config = copy.deepcopy(config)
        encoder_config.use_cache = False
        # 初始化 T5 主层作为编码器
        self.encoder = TFT5MainLayer(encoder_config, self.shared, name="encoder")

    def get_encoder(self):
        # 返回编码器对象
        return self.encoder

    @unpack_inputs
    @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
    def call(
        self,
        input_ids: TFModelInputType | None = None,
        attention_mask: np.ndarray | tf.Tensor | None = None,
        head_mask: np.ndarray | tf.Tensor | None = None,
        inputs_embeds: np.ndarray | tf.Tensor | None = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        training: Optional[bool] = False,
    ) -> Union[Tuple, TFBaseModelOutput]:
        r"""
        Runs the T5 encoder model on inputs.

        Returns:
            TFBaseModelOutput or Tuple: Depending on `return_dict`, returns either a dictionary or a tuple of model outputs.

        Examples:
        
        ```
        >>> from transformers import AutoTokenizer, TFT5EncoderModel

        >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
        >>> model = TFT5EncoderModel.from_pretrained("google-t5/t5-small")

        >>> input_ids = tokenizer(
        ...     "Studies have been shown that owning a dog is good for you", return_tensors="tf"
        ... ).input_ids  # Batch size 1
        >>> outputs = model(input_ids)
        ```
        """

        # 调用编码器计算输出
        encoder_outputs = self.encoder(
            input_ids,
            attention_mask=attention_mask,
            encoder_hidden_states=None,
            encoder_attention_mask=None,
            inputs_embeds=inputs_embeds,
            head_mask=head_mask,
            past_key_values=None,
            use_cache=False,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            training=training,
        )

        # 如果不需要返回字典,则直接返回编码器输出
        if not return_dict:
            return encoder_outputs

        # 返回 TFBaseModelOutput 类型的对象,封装编码器输出
        return TFBaseModelOutput(
            last_hidden_state=encoder_outputs.last_hidden_state,
            hidden_states=encoder_outputs.hidden_states,
            attentions=encoder_outputs.attentions,
        )
    # 如果模型已经构建完成,则直接返回,不再重复构建
    if self.built:
        return
    
    # 设置模型已构建标志为 True
    self.built = True
    
    # 共享/共用权重预期应位于模型基础命名空间中
    # 在 tf.name_scope 后面添加 "/"(而不是在开头添加!)将其放置在根命名空间而不是当前命名空间。
    with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
        # 构建共享部分模型
        self.shared.build(None)
    
    # 如果存在编码器部分
    if getattr(self, "encoder", None) is not None:
        with tf.name_scope(self.encoder.name):
            # 构建编码器模型
            self.encoder.build(None)

.\models\t5\tokenization_t5.py

# coding=utf-8
# Copyright 2018 T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model T5."""

# Import necessary standard library modules
import os                       # 导入操作系统相关功能的模块
import re                       # 导入正则表达式模块
import warnings                 # 导入警告处理模块
from shutil import copyfile     # 从 shutil 模块导入 copyfile 函数
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple   # 导入类型提示相关的模块

# Import SentencePiece library for tokenization
import sentencepiece as spm    # 导入 SentencePiece 库

# Import functions and classes from Transformers package
from ...convert_slow_tokenizer import import_protobuf     # 从特定路径导入 import_protobuf 函数
from ...tokenization_utils import PreTrainedTokenizer    # 从 tokenization_utils 模块导入 PreTrainedTokenizer 类
from ...tokenization_utils_base import AddedToken         # 从 tokenization_utils_base 模块导入 AddedToken 类

# Check type hints only during static type checking
if TYPE_CHECKING:
    from ...tokenization_utils_base import TextInput    # 仅在静态类型检查时导入 TextInput 类
from ...utils import logging    # 从 utils 模块导入 logging 模块

# Get logger instance for logging messages
logger = logging.get_logger(__name__)

# Define constant for vocabulary file names
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}

# Define mapping of pretrained model names to their respective vocabulary file URLs
PRETRAINED_VOCAB_FILES_MAP = {
    "vocab_file": {
        "google-t5/t5-small": "https://huggingface.co/google-t5/t5-small/resolve/main/spiece.model",
        "google-t5/t5-base": "https://huggingface.co/google-t5/t5-base/resolve/main/spiece.model",
        "google-t5/t5-large": "https://huggingface.co/google-t5/t5-large/resolve/main/spiece.model",
        "google-t5/t5-3b": "https://huggingface.co/google-t5/t5-3b/resolve/main/spiece.model",
        "google-t5/t5-11b": "https://huggingface.co/google-t5/t5-11b/resolve/main/spiece.model",
    }
}

# TODO(PVP) - this should be removed in Transformers v5
# Define sizes of positional embeddings for different pretrained T5 models
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
    "google-t5/t5-small": 512,
    "google-t5/t5-base": 512,
    "google-t5/t5-large": 512,
    "google-t5/t5-3b": 512,
    "google-t5/t5-11b": 512,
}

# Define a special token used by SentencePiece for word beginning
SPIECE_UNDERLINE = "▁"


class T5Tokenizer(PreTrainedTokenizer):
    """
    Construct a T5 tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
    this superclass for more information regarding those methods.

    Attributes:
        sp_model (`SentencePieceProcessor`):
            The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
    """

    vocab_files_names = VOCAB_FILES_NAMES     # Assign constant for vocabulary file names to class attribute
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP   # Assign vocabulary file URL mapping to class attribute
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES    # Assign positional embeddings sizes to class attribute
    model_input_names = ["input_ids", "attention_mask"]   # Define input names required by the model
    # 初始化方法,用于创建一个新的 T5Tokenizer 对象
    def __init__(
        self,
        vocab_file,
        eos_token="</s>",
        unk_token="<unk>",
        pad_token="<pad>",
        extra_ids=100,
        additional_special_tokens=None,
        sp_model_kwargs: Optional[Dict[str, Any]] = None,
        legacy=None,
        add_prefix_space=True,
        **kwargs,
    ):
        # 从参数中初始化 T5Tokenizer 对象的属性
        # 设置结束符号,默认为 "</s>"
        # 设置未知符号,默认为 "<unk>"
        # 设置填充符号,默认为 "<pad>"
        # 设置额外的 ID 数量,默认为 100
        # 设置额外的特殊标记列表
        # 设置 SentencePiece 模型的关键字参数
        # 设置是否使用旧版本兼容模式,默认为 None
        # 设置是否在空格前添加前缀,默认为 True
        pass  # 这里是一个占位符,表示初始化方法暂时不执行任何操作

    # 静态方法,返回 SentencePiece 处理器对象
    @staticmethod
    def get_spm_processor(self, from_slow=False):
        # 根据给定的 sp_model_kwargs 创建 SentencePieceProcessor 对象
        tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        
        # 如果处于兼容模式或者 from_slow 为 True,则不依赖于 protobuf,直接加载词汇文件
        if self.legacy or from_slow:
            # 从磁盘加载 SentencePiece 词汇文件
            tokenizer.Load(self.vocab_file)
            return tokenizer

        # 否则,使用新的行为,依赖于 protobuf 加载模型
        with open(self.vocab_file, "rb") as f:
            sp_model = f.read()
            # 导入 protobuf 模型定义
            model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
            # 反序列化 protobuf 模型
            model = model_pb2.ModelProto.FromString(sp_model)
            # 设置规范化器规范
            normalizer_spec = model_pb2.NormalizerSpec()
            normalizer_spec.add_dummy_prefix = False
            model.normalizer_spec.MergeFrom(normalizer_spec)
            # 序列化模型为字节流
            sp_model = model.SerializeToString()
            # 使用序列化后的模型初始化 tokenizer
            tokenizer.LoadFromSerializedProto(sp_model)
        
        return tokenizer

    # 属性方法,返回词汇表大小
    @property
    def vocab_size(self):
        # 获取 SentencePieceProcessor 对象的词汇大小
        return self.sp_model.get_piece_size()
    def get_vocab(self):
        """
        构建词汇表字典,将词汇索引映射到对应的词汇。
        """
        vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
        # 将额外添加的特殊词汇编码器更新到词汇表中
        vocab.update(self.added_tokens_encoder)
        return vocab

    def get_special_tokens_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
    ) -> List[int]:
        """
        检索没有添加特殊标记的令牌列表的序列ID。当使用分词器的 `prepare_for_model` 方法添加特殊标记时调用此方法。

        Args:
            token_ids_0 (`List[int]`):
                ID 列表。
            token_ids_1 (`List[int]`, *optional*):
                可选的第二个 ID 列表,用于序列对。
            already_has_special_tokens (`bool`, *optional*, 默认为 `False`):
                标记列表是否已经包含了模型的特殊标记。

        Returns:
            `List[int]`: 一个整数列表,范围在 [0, 1]:1 表示特殊标记,0 表示序列标记。
        """
        if already_has_special_tokens:
            return super().get_special_tokens_mask(
                token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
            )

        # 普通情况:一些特殊标记
        if token_ids_1 is None:
            return ([0] * len(token_ids_0)) + [1]
        return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]

    def get_sentinel_tokens(self):
        """
        返回包含特殊标记的令牌列表。
        """
        return list(
            set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
        )

    def get_sentinel_token_ids(self):
        """
        返回特殊标记的令牌 ID 列表。
        """
        return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]

    def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
        """
        如果用户尚未添加 EOS 标记,则不再添加 EOS。
        """
        if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
            warnings.warn(
                f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
                " eos tokens being added."
            )
            return token_ids
        else:
            return token_ids + [self.eos_token_id]

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        从序列中创建令牌类型 ID 列表。
        """
    def create_mask(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
        use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs representing the first sequence.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs representing the second sequence for sequence pairs.

        Returns:
            `List[int]`: List of zeros representing the mask.
        """
        # Define the end-of-sequence token
        eos = [self.eos_token_id]

        # If only one sequence is provided, return a mask for it
        if token_ids_1 is None:
            return len(token_ids_0 + eos) * [0]
        
        # If two sequences are provided, concatenate them with special tokens and return a mask for the combined sequence
        return len(token_ids_0 + eos + token_ids_1 + eos) * [0]

    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and
        adding special tokens.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs representing the first sequence.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs representing the second sequence for sequence pairs.

        Returns:
            `List[int]`: List of input IDs with the appropriate special tokens added.
        """
        # Add end-of-sequence token if not already present
        token_ids_0 = self._add_eos_if_not_present(token_ids_0)
        
        # If only one sequence is provided, return it with special tokens added
        if token_ids_1 is None:
            return token_ids_0
        else:
            # Add end-of-sequence token if not already present for the second sequence
            token_ids_1 = self._add_eos_if_not_present(token_ids_1)
            # Concatenate both sequences with special tokens added
            return token_ids_0 + token_ids_1

    def __getstate__(self):
        # Copy the object's state dictionary
        state = self.__dict__.copy()
        # Set 'sp_model' attribute to None to avoid serializing it
        state["sp_model"] = None
        return state

    def __setstate__(self, d):
        # Restore object's state from the dictionary 'd'
        self.__dict__ = d

        # For backward compatibility, initialize 'sp_model_kwargs' if not already present
        if not hasattr(self, "sp_model_kwargs"):
            self.sp_model_kwargs = {}

        # Load SentencePiece processor from 'vocab_file'
        self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
        self.sp_model.Load(self.vocab_file)

    def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
        """
        Converts a string to a list of tokens. Adds a prefix token if `self.legacy` is False and the first token is not special.
        """
        # Call superclass's tokenize method if legacy mode is enabled or text is empty
        if self.legacy or len(text) == 0:
            return super().tokenize(text, **kwargs)

        # Replace SPIECE_UNDERLINE with space in the text
        text = text.replace(SPIECE_UNDERLINE, " ")

        # Add SPIECE_UNDERLINE prefix to the text if required
        if self.add_prefix_space:
            text = SPIECE_UNDERLINE + text

        # Tokenize the text using superclass's tokenize method
        tokens = super().tokenize(text, **kwargs)

        # Remove the prefix token if it is the first token and not a special token
        if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
            tokens = tokens[1:]

        return tokens

    @property
    def unk_token_length(self):
        """
        Calculate the length of the unknown token.
        """
        return len(self.sp_model.encode(str(self.unk_token)))
    def _tokenize(self, text, **kwargs):
        """
        Returns a tokenized string.

        We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
        SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
        `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
        `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
        `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
        """
        # 使用 sentencepiece 模型对文本进行编码,返回字符串类型的编码结果
        tokens = self.sp_model.encode(text, out_type=str)
        # 如果是传统模式或者文本不以 SPIECE_UNDERLINE 或空格开头,则直接返回编码结果
        if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
            return tokens

        # 1. 将文本添加 unk_token 前缀,例如 "<unk> Hey"
        tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
        # 2. 从编码结果中去除 unk_token,例如 ['<','unk','>', '▁Hey']
        return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens

    def _convert_token_to_id(self, token):
        """Converts a token (str) in an id using the vocab."""
        # 使用词汇表将 token 转换为对应的 id
        return self.sp_model.piece_to_id(token)

    def _convert_id_to_token(self, index):
        """Converts an index (integer) in a token (str) using the vocab."""
        # 使用词汇表将 index 转换为对应的 token
        token = self.sp_model.IdToPiece(index)
        return token

    def convert_tokens_to_string(self, tokens):
        """Converts a sequence of tokens (string) in a single string."""
        # 因为我们手动添加了前缀空格,所以在解码时需要将其去除
        if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
            tokens[0] = tokens[0][1:]

        current_sub_tokens = []
        out_string = ""
        prev_is_special = False
        for token in tokens:
            # 确保特殊 token 不通过 sentencepiece 模型进行解码
            if token in self.all_special_tokens:
                if not prev_is_special:
                    out_string += " "
                out_string += self.sp_model.decode(current_sub_tokens) + token
                prev_is_special = True
                current_sub_tokens = []
            else:
                current_sub_tokens.append(token)
                prev_is_special = False
        out_string += self.sp_model.decode(current_sub_tokens)
        return out_string.strip()
    # 保存词汇表到指定目录,返回保存的文件路径元组
    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        # 检查保存目录是否存在,如果不存在则记录错误并返回
        if not os.path.isdir(save_directory):
            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
            return
        
        # 构建输出的词汇表文件路径,包括可选的前缀和固定的文件名
        out_vocab_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
        )

        # 如果当前词汇表文件路径与目标路径不同,并且当前词汇表文件存在,则复制当前词汇表文件到目标路径
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
        # 如果当前词汇表文件不存在,则将序列化的词汇表模型内容写入到目标文件
        elif not os.path.isfile(self.vocab_file):
            with open(out_vocab_file, "wb") as fi:
                content_spiece_model = self.sp_model.serialized_model_proto()
                fi.write(content_spiece_model)

        # 返回保存的词汇表文件路径的元组
        return (out_vocab_file,)

.\models\t5\tokenization_t5_fast.py

# coding=utf-8
# 版权 2018 年 T5 作者和 HuggingFace Inc. 团队。
#
# 根据 Apache 许可证 2.0 版本授权,除非符合许可证的要求,否则不得使用此文件。
# 您可以在以下网址获取许可证的副本:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# 除非适用法律要求或书面同意,本软件是基于“原样”分发的,没有任何形式的明示或暗示担保。
# 请查阅许可证了解特定的语言权限和限制。
""" T5 模型的分词类 """

import os                   # 导入操作系统功能模块
import re                   # 导入正则表达式模块
import warnings             # 导入警告模块
from shutil import copyfile # 导入复制文件功能
from typing import List, Optional, Tuple  # 导入类型提示相关的类

from ...tokenization_utils_fast import PreTrainedTokenizerFast  # 从快速分词工具中导入预训练分词器
from ...utils import is_sentencepiece_available, logging  # 从工具包中导入判断是否有句子片段可用的函数和日志记录功能

if is_sentencepiece_available():  # 如果句子片段可用
    from .tokenization_t5 import T5Tokenizer  # 从 T5 分词文件中导入 T5Tokenizer 类
else:
    T5Tokenizer = None  # 否则将 T5Tokenizer 设为 None

logger = logging.get_logger(__name__)  # 获取当前模块的日志记录器

VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}  # 词汇文件名称映射字典

PRETRAINED_VOCAB_FILES_MAP = {
    "vocab_file": {  # 预训练词汇文件映射
        "google-t5/t5-small": "https://huggingface.co/google-t5/t5-small/resolve/main/spiece.model",
        "google-t5/t5-base": "https://huggingface.co/google-t5/t5-base/resolve/main/spiece.model",
        "google-t5/t5-large": "https://huggingface.co/google-t5/t5-large/resolve/main/spiece.model",
        "google-t5/t5-3b": "https://huggingface.co/google-t5/t5-3b/resolve/main/spiece.model",
        "google-t5/t5-11b": "https://huggingface.co/google-t5/t5-11b/resolve/main/spiece.model",
    },
    "tokenizer_file": {  # 预训练分词器文件映射
        "google-t5/t5-small": "https://huggingface.co/google-t5/t5-small/resolve/main/tokenizer.json",
        "google-t5/t5-base": "https://huggingface.co/google-t5/t5-base/resolve/main/tokenizer.json",
        "google-t5/t5-large": "https://huggingface.co/google-t5/t5-large/resolve/main/tokenizer.json",
        "google-t5/t5-3b": "https://huggingface.co/google-t5/t5-3b/resolve/main/tokenizer.json",
        "google-t5/t5-11b": "https://huggingface.co/google-t5/t5-11b/resolve/main/tokenizer.json",
    },
}

# TODO(PVP) - this should be removed in Transformers v5
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
    "google-t5/t5-small": 512,   # 预训练位置嵌入大小映射,指定了每个 T5 模型的默认嵌入大小为 512
    "google-t5/t5-base": 512,
    "google-t5/t5-large": 512,
    "google-t5/t5-3b": 512,
    "google-t5/t5-11b": 512,
}


class T5TokenizerFast(PreTrainedTokenizerFast):
    """
    构建一个“快速”T5分词器(基于HuggingFace的*tokenizers*库)。基于
    [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models)。

    这个分词器继承自[`PreTrainedTokenizerFast`],该类包含大部分主要方法。用户应
    参考超类以获取更多关于这些方法的信息。
    """
    # 定义了一些常量和类变量,这些变量用于配置和初始化Tokenizer类的实例
    vocab_files_names = VOCAB_FILES_NAMES
    pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
    model_input_names = ["input_ids", "attention_mask"]
    slow_tokenizer_class = T5Tokenizer

    # 前缀特殊标记的空列表
    prefix_tokens: List[int] = []

    # Tokenizer类的构造函数,用于实例化一个新的Tokenizer对象
    def __init__(
        self,
        vocab_file=None,
        tokenizer_file=None,
        eos_token="</s>",
        unk_token="<unk>",
        pad_token="<pad>",
        extra_ids=100,
        additional_special_tokens=None,
        add_prefix_space=None,
        **kwargs,
        # 如果 additional_special_tokens 参数不为空,则从中提取所有带有 "<extra_id_" 标记的额外特殊标记
        if additional_special_tokens is not None:
            extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)]
            # 如果没有找到带有 "<extra_id_" 标记的额外特殊标记,且需要生成额外的标记,则添加相应数量的 "<extra_id_>" 标记
            if len(extra_tokens) < 1:
                additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)]
            # 如果 extra_ids 大于 0 并且额外特殊标记的数量不等于 extra_ids,则抛出 ValueError 异常
            elif extra_ids > 0 and extra_ids != len(extra_tokens):
                raise ValueError(
                    f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
                    " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
                    " tokens"
                )
        else:
            # 如果 additional_special_tokens 参数为空,则创建一个包含相应数量 "<extra_id_>" 标记的列表
            extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
            # 将生成的额外特殊标记列表赋值给 additional_special_tokens 参数
            additional_special_tokens = extra_tokens

        # 如果 add_prefix_space 参数不为空,则发出一次警告日志
        if add_prefix_space is not None:
            logger.warning_once(
                "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers"
            )
            # 将 from_slow 参数设置为 True,以便在初始化时使用
            kwargs["from_slow"] = True

        # 调用父类的初始化方法,传入必要的参数和关键字参数
        super().__init__(
            vocab_file,
            tokenizer_file=tokenizer_file,
            eos_token=eos_token,
            unk_token=unk_token,
            pad_token=pad_token,
            extra_ids=extra_ids,
            additional_special_tokens=additional_special_tokens,
            **kwargs,
        )

        # 将 vocab_file 和 extra_ids 属性保存在当前对象中
        self.vocab_file = vocab_file
        self._extra_ids = extra_ids
    def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
        # 检查预训练模型名称或路径是否在T5TokenizerFast.max_model_input_sizes中
        if pretrained_model_name_or_path in T5TokenizerFast.max_model_input_sizes:
            # 获取过时的最大模型长度
            deprecated_max_model_length = T5TokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
            # 如果init_max_model_length不为空且与max_model_length不相等,则返回init_max_model_length
            if init_max_model_length is not None and init_max_model_length != max_model_length:
                return init_max_model_length
            # 如果init_max_model_length为空
            elif init_max_model_length is None:
                # 发出警告,指出实例化时的错误行为,并提示将在Transformers v5中更正
                warnings.warn(
                    "This tokenizer was incorrectly instantiated with a model max length of"
                    f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
                    " behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
                    " `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
                    f" {pretrained_model_name_or_path} automatically truncating your input to"
                    f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
                    f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
                    " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
                    " instantiate this tokenizer with `model_max_length` set to your preferred value.",
                    FutureWarning,
                )

        # 返回当前的max_model_length
        return max_model_length

    def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
        # 如果不能保存慢速tokenizer的词汇表,则引发错误
        if not self.can_save_slow_tokenizer:
            raise ValueError(
                "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
                "tokenizer."
            )

        # 如果保存目录不是一个有效的目录,记录错误并返回
        if not os.path.isdir(save_directory):
            logger.error(f"Vocabulary path ({save_directory}) should be a directory")
            return
        
        # 确定输出的词汇文件路径,并复制词汇文件
        out_vocab_file = os.path.join(
            save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
        )
        if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
            copyfile(self.vocab_file, out_vocab_file)
            logger.info(f"Copy vocab file to {out_vocab_file}")

        # 返回输出的词汇文件路径
        return (out_vocab_file,)

    def build_inputs_with_special_tokens(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
        # 该方法用于构建包含特殊令牌的输入序列
    def build_inputs_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Build model inputs from a sequence or a pair of sequences for sequence classification tasks by concatenating and
        adding special tokens. A sequence has the following format:

        - single sequence: `X </s>`
        - pair of sequences: `A </s> B </s>`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of input IDs with the appropriate special tokens added.
        """
        # Add end-of-sequence token to the first sequence
        token_ids_0 = token_ids_0 + [self.eos_token_id]
        
        if token_ids_1 is None:
            # Return the prefix tokens followed by token_ids_0
            return self.prefix_tokens + token_ids_0
        else:
            # Add end-of-sequence token to the second sequence
            token_ids_1 = token_ids_1 + [self.eos_token_id]
            # Return the prefix tokens followed by concatenated token_ids_0 and token_ids_1
            return self.prefix_tokens + token_ids_0 + token_ids_1

    def create_token_type_ids_from_sequences(
        self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
    ) -> List[int]:
        """
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
        use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of zeros as T5 does not use token type ids.
        """
        eos = [self.eos_token_id]

        if token_ids_1 is None:
            # Return a list of zeros corresponding to the length of token_ids_0 + eos
            return len(token_ids_0 + eos) * [0]
        else:
            # Return a list of zeros corresponding to the length of token_ids_0 + eos + token_ids_1 + eos
            return len(token_ids_0 + eos + token_ids_1 + eos) * [0]

    def get_sentinel_tokens(self):
        """
        Get sentinel tokens from the additional special tokens list based on a regex pattern matching.

        Returns:
            List[str]: List of sentinel tokens.
        """
        return list(
            set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
        )

    def get_sentinel_token_ids(self):
        """
        Convert sentinel tokens to their corresponding token IDs.

        Returns:
            List[int]: List of token IDs of sentinel tokens.
        """
        return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]

.\models\t5\__init__.py

# 导入必要的模块和函数
from typing import TYPE_CHECKING
from ...utils import (
    OptionalDependencyNotAvailable,
    _LazyModule,
    is_flax_available,
    is_sentencepiece_available,
    is_tf_available,
    is_tokenizers_available,
    is_torch_available,
)

# 定义一个字典,用于存储导入结构
_import_structure = {"configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config", "T5OnnxConfig"]}

# 检查是否存在 sentencepiece,并根据情况抛出 OptionalDependencyNotAvailable 异常
try:
    if not is_sentencepiece_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果存在 sentencepiece,则导入 T5Tokenizer 到 tokenization_t5
    _import_structure["tokenization_t5"] = ["T5Tokenizer"]

# 检查是否存在 tokenizers,并根据情况抛出 OptionalDependencyNotAvailable 异常
try:
    if not is_tokenizers_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果存在 tokenizers,则导入 T5TokenizerFast 到 tokenization_t5_fast
    _import_structure["tokenization_t5_fast"] = ["T5TokenizerFast"]

# 检查是否存在 torch,并根据情况抛出 OptionalDependencyNotAvailable 异常
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果存在 torch,则导入 T5 相关的模型和函数到 modeling_t5
    _import_structure["modeling_t5"] = [
        "T5_PRETRAINED_MODEL_ARCHIVE_LIST",
        "T5EncoderModel",
        "T5ForConditionalGeneration",
        "T5Model",
        "T5PreTrainedModel",
        "load_tf_weights_in_t5",
        "T5ForQuestionAnswering",
        "T5ForSequenceClassification",
        "T5ForTokenClassification",
    ]

# 检查是否存在 tensorflow,并根据情况抛出 OptionalDependencyNotAvailable 异常
try:
    if not is_tf_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果存在 tensorflow,则导入 T5 相关的模型和函数到 modeling_tf_t5
    _import_structure["modeling_tf_t5"] = [
        "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST",
        "TFT5EncoderModel",
        "TFT5ForConditionalGeneration",
        "TFT5Model",
        "TFT5PreTrainedModel",
    ]

# 检查是否存在 flax,并根据情况抛出 OptionalDependencyNotAvailable 异常
try:
    if not is_flax_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果存在 flax,则导入 T5 相关的模型和函数到 modeling_flax_t5
    _import_structure["modeling_flax_t5"] = [
        "FlaxT5EncoderModel",
        "FlaxT5ForConditionalGeneration",
        "FlaxT5Model",
        "FlaxT5PreTrainedModel",
    ]

# 如果是类型检查阶段,导入必要的类型和函数定义
if TYPE_CHECKING:
    from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config, T5OnnxConfig

    # 再次检查是否存在 sentencepiece 并导入 T5Tokenizer
    try:
        if not is_sentencepiece_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        from .tokenization_t5 import T5Tokenizer

    # 再次检查是否存在 tokenizers 并导入 T5Tokenizer
    try:
        if not is_tokenizers_available():
            raise OptionalDependencyNotAvailable()
    # 尝试导入 T5TokenizerFast,如果 OptionalDependencyNotAvailable 异常发生则跳过
    try:
        from .tokenization_t5_fast import T5TokenizerFast
    # 如果 OptionalDependencyNotAvailable 异常发生,则什么也不做,跳过
    except OptionalDependencyNotAvailable:
        pass
    # 如果没有异常发生,则导入成功,可以继续后续操作
    else:
        # 尝试检查是否 Torch 可用,如果不可用则引发 OptionalDependencyNotAvailable 异常
        try:
            if not is_torch_available():
                raise OptionalDependencyNotAvailable()
        # 如果 OptionalDependencyNotAvailable 异常发生,则跳过
        except OptionalDependencyNotAvailable:
            pass
        # 如果没有异常发生,则 Torch 可用,继续导入相关模块
        else:
            # 导入 T5 相关的 PyTorch 模型和函数
            from .modeling_t5 import (
                T5_PRETRAINED_MODEL_ARCHIVE_LIST,
                T5EncoderModel,
                T5ForConditionalGeneration,
                T5ForQuestionAnswering,
                T5ForSequenceClassification,
                T5ForTokenClassification,
                T5Model,
                T5PreTrainedModel,
                load_tf_weights_in_t5,
            )

        # 尝试检查是否 TensorFlow 可用,如果不可用则引发 OptionalDependencyNotAvailable 异常
        try:
            if not is_tf_available():
                raise OptionalDependencyNotAvailable()
        # 如果 OptionalDependencyNotAvailable 异常发生,则跳过
        except OptionalDependencyNotAvailable:
            pass
        # 如果没有异常发生,则 TensorFlow 可用,继续导入相关模块
        else:
            # 导入 T5 相关的 TensorFlow 模型和函数
            from .modeling_tf_t5 import (
                TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST,
                TFT5EncoderModel,
                TFT5ForConditionalGeneration,
                TFT5Model,
                TFT5PreTrainedModel,
            )

        # 尝试检查是否 Flax 可用,如果不可用则引发 OptionalDependencyNotAvailable 异常
        try:
            if not is_flax_available():
                raise OptionalDependencyNotAvailable()
        # 如果 OptionalDependencyNotAvailable 异常发生,则跳过
        except OptionalDependencyNotAvailable:
            pass
        # 如果没有异常发生,则 Flax 可用,继续导入相关模块
        else:
            # 导入 T5 相关的 Flax 模型和函数
            from .modeling_flax_t5 import (
                FlaxT5EncoderModel,
                FlaxT5ForConditionalGeneration,
                FlaxT5Model,
                FlaxT5PreTrainedModel,
            )
else:
    # 导入 sys 模块,用于操作 Python 解释器的系统功能
    import sys

    # 将当前模块注册到 sys.modules 中,使用 _LazyModule 进行延迟加载
    sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)

.\models\table_transformer\configuration_table_transformer.py

# coding=utf-8
# 指定文件编码格式为 UTF-8

# 版权声明,版权归 The HuggingFace Inc. 团队所有
# 根据 Apache 许可证版本 2.0 进行许可
# 除非符合许可证要求,否则不得使用本文件
# 您可以在以下网址获取许可证的副本:
# http://www.apache.org/licenses/LICENSE-2.0

# 导入必要的模块
""" Table Transformer 模型配置"""
from collections import OrderedDict  # 导入 OrderedDict 类
from typing import Mapping  # 导入 Mapping 类型

from packaging import version  # 导入 version 函数

# 导入配置相关的模块和类
from ...configuration_utils import PretrainedConfig  # 导入预训练配置类
from ...onnx import OnnxConfig  # 导入 Onnx 配置类
from ...utils import logging  # 导入日志工具
from ..auto import CONFIG_MAPPING  # 导入自动配置映射

# 获取日志记录器
logger = logging.get_logger(__name__)

# 定义预训练模型与配置文件映射
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
    "microsoft/table-transformer-detection": (
        "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
    ),
}

# TableTransformerConfig 类,继承自 PretrainedConfig 类
class TableTransformerConfig(PretrainedConfig):
    r"""
    This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to
    instantiate a Table Transformer model according to the specified arguments, defining the model architecture.
    Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer
    [microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Examples:

    ```
    >>> from transformers import TableTransformerModel, TableTransformerConfig

    >>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration
    >>> configuration = TableTransformerConfig()

    >>> # Initializing a model from the microsoft/table-transformer-detection style configuration
    >>> model = TableTransformerModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```

    """
    
    # 模型类型设为 "table-transformer"
    model_type = "table-transformer"
    # 推理时忽略的关键字列表
    keys_to_ignore_at_inference = ["past_key_values"]
    # 属性映射字典,用于配置转换
    attribute_map = {
        "hidden_size": "d_model",
        "num_attention_heads": "encoder_attention_heads",
    }

    # 以下内容是从 transformers.models.detr.configuration_detr.DetrConfig.__init__ 中复制而来
    # 定义一个类的初始化方法,初始化对象的各个属性
    def __init__(
        self,
        use_timm_backbone=True,  # 是否使用timm的骨干网络,默认为True
        backbone_config=None,  # 骨干网络配置参数,默认为None
        num_channels=3,  # 输入通道数,默认为3
        num_queries=100,  # 查询的数量,默认为100
        encoder_layers=6,  # 编码器层数,默认为6
        encoder_ffn_dim=2048,  # 编码器中FFN层的维度,默认为2048
        encoder_attention_heads=8,  # 编码器中注意力头的数量,默认为8
        decoder_layers=6,  # 解码器层数,默认为6
        decoder_ffn_dim=2048,  # 解码器中FFN层的维度,默认为2048
        decoder_attention_heads=8,  # 解码器中注意力头的数量,默认为8
        encoder_layerdrop=0.0,  # 编码器层dropout比率,默认为0.0
        decoder_layerdrop=0.0,  # 解码器层dropout比率,默认为0.0
        is_encoder_decoder=True,  # 是否为编码-解码结构,默认为True
        activation_function="relu",  # 激活函数类型,默认为"relu"
        d_model=256,  # 模型维度,默认为256
        dropout=0.1,  # 全局dropout比率,默认为0.1
        attention_dropout=0.0,  # 注意力机制的dropout比率,默认为0.0
        activation_dropout=0.0,  # 激活函数的dropout比率,默认为0.0
        init_std=0.02,  # 初始化的标准差,默认为0.02
        init_xavier_std=1.0,  # Xavier初始化的标准差,默认为1.0
        auxiliary_loss=False,  # 是否使用辅助损失,默认为False
        position_embedding_type="sine",  # 位置嵌入类型,默认为"sine"
        backbone="resnet50",  # 骨干网络类型,默认为"resnet50"
        use_pretrained_backbone=True,  # 是否使用预训练的骨干网络,默认为True
        backbone_kwargs=None,  # 骨干网络的其他关键字参数,默认为None
        dilation=False,  # 是否使用扩张卷积,默认为False
        class_cost=1,  # 分类损失的系数,默认为1
        bbox_cost=5,  # 边界框损失的系数,默认为5
        giou_cost=2,  # GIoU损失的系数,默认为2
        mask_loss_coefficient=1,  # 掩膜损失的系数,默认为1
        dice_loss_coefficient=1,  # Dice损失的系数,默认为1
        bbox_loss_coefficient=5,  # 边界框损失的系数,默认为5
        giou_loss_coefficient=2,  # GIoU损失的系数,默认为2
        eos_coefficient=0.1,  # EOS损失的系数,默认为0.1
        **kwargs,  # 其他可选关键字参数
    ):
    @property
    # 返回编码器中的注意力头数量
    def num_attention_heads(self) -> int:
        return self.encoder_attention_heads
    
    @property
    # 返回模型的隐藏层大小(维度)
    def hidden_size(self) -> int:
        return self.d_model
# Copied from transformers.models.detr.configuration_detr.DetrOnnxConfig
# 从 transformers.models.detr.configuration_detr.DetrOnnxConfig 中复制而来

class TableTransformerOnnxConfig(OnnxConfig):
    # 定义 torch_onnx_minimum_version 属性,指定最低的 Torch 版本要求为 1.11
    torch_onnx_minimum_version = version.parse("1.11")

    @property
    def inputs(self) -> Mapping[str, Mapping[int, str]]:
        # 返回一个有序字典,描述模型输入的名称与维度索引的映射关系
        return OrderedDict(
            [
                ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
                ("pixel_mask", {0: "batch"}),
            ]
        )

    @property
    def atol_for_validation(self) -> float:
        # 返回一个浮点数,表示在验证时使用的绝对容差值
        return 1e-5

    @property
    def default_onnx_opset(self) -> int:
        # 返回一个整数,表示默认的 ONNX 运算集版本号
        return 12

.\models\table_transformer\convert_table_transformer_to_hf.py

# 设置文件编码为 UTF-8
# 版权声明:2022 年由 HuggingFace Inc. 团队所有
#
# 根据 Apache 许可证 2.0 版本(“许可证”)进行许可;
# 除非符合许可证要求,否则不得使用此文件。
# 您可以在以下网址获取许可证副本:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# 除非适用法律要求或书面同意,否则按“原样”分发软件
# 软件不附带任何明示或暗示的担保或条件。
# 有关具体语言的条款,请参阅许可证。
"""使用 timm-backbone 转换 Table Transformer 检查点。

URL: https://github.com/microsoft/table-transformer
"""


import argparse  # 导入命令行参数解析模块
from collections import OrderedDict  # 导入有序字典模块
from pathlib import Path  # 导入路径操作模块

import torch  # 导入 PyTorch 模块
from huggingface_hub import hf_hub_download  # 从 huggingface_hub 下载模块
from PIL import Image  # 导入 PIL 图像处理模块
from torchvision.transforms import functional as F  # 导入 torchvision 的变换功能

from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection  # 导入 Transformers 模块
from transformers.utils import logging  # 导入日志记录模块


logging.set_verbosity_info()  # 设置日志记录级别为信息
logger = logging.get_logger(__name__)  # 获取当前模块的日志记录器

# 这里列出所有要重命名的键(原始名称在左侧,我们的名称在右侧)
rename_keys = []
for i in range(6):
    # 编码器层:输出投影、两个前馈神经网络和两个层归一化
    rename_keys.append(
        (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
    )
    rename_keys.append(
        (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
    )
    rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
    rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
    rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
    rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
    rename_keys.append(
        (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
    )
    rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
    rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
    rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
    # 解码器层:两次输出投影、两个前馈神经网络和三个层归一化
    rename_keys.append(
        (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
    )
    rename_keys.append(
        (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
    )
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append(
        (
            f"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
            f"decoder.layers.{i}.encoder_attn.out_proj.weight",
        )
    )
    rename_keys.append(
        (
            f"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
            f"decoder.layers.{i}.encoder_attn.out_proj.bias",
        )
    )
    
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
    rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
    
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
    rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
    
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append(
        (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
    )
    rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
    
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append(
        (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
    )
    rename_keys.append(
        (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
    )
    
    # 将以下两个键添加到重命名键列表中,用于对应变换后的模型参数命名
    rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
    rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# 扩展重命名键列表,用于转换模型参数命名
rename_keys.extend(
    [
        ("input_proj.weight", "input_projection.weight"),  # 将输入投影层权重重命名为input_projection.weight
        ("input_proj.bias", "input_projection.bias"),  # 将输入投影层偏置重命名为input_projection.bias
        ("query_embed.weight", "query_position_embeddings.weight"),  # 将查询嵌入权重重命名为query_position_embeddings.weight
        ("transformer.encoder.norm.weight", "encoder.layernorm.weight"),  # 将编码器层归一化层权重重命名为encoder.layernorm.weight
        ("transformer.encoder.norm.bias", "encoder.layernorm.bias"),  # 将编码器层归一化层偏置重命名为encoder.layernorm.bias
        ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),  # 将解码器层归一化层权重重命名为decoder.layernorm.weight
        ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),  # 将解码器层归一化层偏置重命名为decoder.layernorm.bias
        ("class_embed.weight", "class_labels_classifier.weight"),  # 将类别嵌入权重重命名为class_labels_classifier.weight
        ("class_embed.bias", "class_labels_classifier.bias"),  # 将类别嵌入偏置重命名为class_labels_classifier.bias
        ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),  # 将边界框嵌入第一层权重重命名为bbox_predictor.layers.0.weight
        ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),  # 将边界框嵌入第一层偏置重命名为bbox_predictor.layers.0.bias
        ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),  # 将边界框嵌入第二层权重重命名为bbox_predictor.layers.1.weight
        ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),  # 将边界框嵌入第二层偏置重命名为bbox_predictor.layers.1.bias
        ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),  # 将边界框嵌入第三层权重重命名为bbox_predictor.layers.2.weight
        ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),  # 将边界框嵌入第三层偏置重命名为bbox_predictor.layers.2.bias
    ]
)


def rename_key(state_dict, old, new):
    # 从状态字典中弹出旧键,并用新键重新添加值
    val = state_dict.pop(old)
    state_dict[new] = val


def rename_backbone_keys(state_dict):
    new_state_dict = OrderedDict()
    for key, value in state_dict.items():
        if "backbone.0.body" in key:
            # 将backbone.0.body替换为backbone.conv_encoder.model作为新键
            new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model")
            new_state_dict[new_key] = value
        else:
            new_state_dict[key] = value

    return new_state_dict


def read_in_q_k_v(state_dict):
    prefix = ""

    # 第一部分:处理transformer编码器
    for i in range(6):
        # 读取编码器自注意力层中的输入投影层权重和偏置(在PyTorch的MultiHeadAttention中,这是一个矩阵加偏置)
        in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
        in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")
        # 将查询、键和值(按顺序)添加到状态字典中
        state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
        state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
        state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
        state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
        state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
        state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
    # 第二部分:处理transformer解码器(稍微复杂一些,因为它还包括交叉注意力)
    # 对每个层次的自注意力输入投影层的权重和偏置进行读取
    in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
    in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
    # 将权重切片并添加到状态字典中作为查询、键和值的投影
    state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
    state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
    state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
    state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
    state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
    state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
    # 读取每个层次的交叉注意力输入投影层的权重和偏置
    in_proj_weight_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight")
    in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
    # 将权重切片并添加到状态字典中作为交叉注意力的查询、键和值的投影
    state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
    state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
    state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
    state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
    state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
    state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
# 调整图像大小至指定的最大尺寸,保持宽高比不变
def resize(image, checkpoint_url):
    # 获取图像的宽度和高度
    width, height = image.size
    # 计算当前图像宽高中的最大值
    current_max_size = max(width, height)
    # 根据检查点 URL 判断目标最大尺寸
    target_max_size = 800 if "detection" in checkpoint_url else 1000
    # 计算缩放比例
    scale = target_max_size / current_max_size
    # 缩放图像,并返回缩放后的图像对象
    resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))

    return resized_image


# 对图像进行标准化处理,转换为张量并进行归一化
def normalize(image):
    # 使用 TorchVision 将 PIL 图像转换为张量
    image = F.to_tensor(image)
    # 根据指定的均值和标准差进行图像归一化处理
    image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    return image


@torch.no_grad()
# 转换表格 Transformer 检查点
def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
    """
    Copy/paste/tweak model's weights to our DETR structure.
    """
    
    logger.info("Converting model...")

    # 从指定 URL 加载原始模型状态字典,使用 CPU 运行
    state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
    # 重命名模型状态字典中的键名
    for src, dest in rename_keys:
        rename_key(state_dict, src, dest)
    # 对骨干网络键名进行进一步重命名处理
    state_dict = rename_backbone_keys(state_dict)
    # 处理查询、键和值矩阵的特殊情况
    read_in_q_k_v(state_dict)
    # 需要在基础模型键名前添加前缀,因为头模型使用不同的属性
    prefix = "model."
    for key in state_dict.copy().keys():
        if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
            val = state_dict.pop(key)
            state_dict[prefix + key] = val
    # 创建 HuggingFace 模型并加载状态字典
    config = TableTransformerConfig(
        backbone="resnet18",
        mask_loss_coefficient=1,
        dice_loss_coefficient=1,
        ce_loss_coefficient=1,
        bbox_loss_coefficient=5,
        giou_loss_coefficient=2,
        eos_coefficient=0.4,
        class_cost=1,
        bbox_cost=5,
        giou_cost=2,
    )

    # 根据检查点 URL 设置不同的配置参数
    if "detection" in checkpoint_url:
        config.num_queries = 15
        config.num_labels = 2
        id2label = {0: "table", 1: "table rotated"}
        config.id2label = id2label
        config.label2id = {v: k for k, v in id2label.items()}
    else:
        config.num_queries = 125
        config.num_labels = 6
        id2label = {
            0: "table",
            1: "table column",
            2: "table row",
            3: "table column header",
            4: "table projected row header",
            5: "table spanning cell",
        }
        config.id2label = id2label
        config.label2id = {v: k for k, v in id2label.items()}

    # 创建 DETR 图像处理器实例
    image_processor = DetrImageProcessor(
        format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1000
    )
    # 创建表格 Transformer 目标检测模型实例并加载状态字典
    model = TableTransformerForObjectDetection(config)
    model.load_state_dict(state_dict)
    model.eval()

    # 验证转换后的模型
    filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
    # 从 HuggingFace Hub 下载指定文件
    file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename)
    # 打开图像文件,并将其转换为RGB格式的图像对象
    image = Image.open(file_path).convert("RGB")
    # 调整图像大小并进行归一化处理,然后添加一个批次维度
    pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0)

    # 使用模型进行推理,得到输出结果
    outputs = model(pixel_values)

    # 根据checkpoint_url判断模型预期输出的形状和内容
    if "detection" in checkpoint_url:
        # 如果是检测模型,预期输出的形状是(1, 15, 3)
        expected_shape = (1, 15, 3)
        # 预期的分类得分(logits)
        expected_logits = torch.tensor(
            [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]]
        )
        # 预期的边界框
        expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]])

    else:
        # 如果是结构识别模型,预期输出的形状是(1, 125, 7)
        expected_shape = (1, 125, 7)
        # 预期的分类得分(logits)
        expected_logits = torch.tensor(
            [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]]
        )
        # 预期的边界框
        expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]])

    # 断言检查模型输出的形状是否符合预期
    assert outputs.logits.shape == expected_shape
    # 断言检查模型输出的分类得分是否与预期一致(使用指定的容差)
    assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
    # 断言检查模型输出的边界框是否与预期一致(使用指定的容差)
    assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)
    # 输出提示信息,表明断言检查通过
    print("Looks ok!")

    if pytorch_dump_folder_path is not None:
        # 如果提供了PyTorch模型保存路径,则保存模型和图像处理器
        logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
        # 确保保存路径存在,若不存在则创建
        Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
        # 将模型保存到指定路径
        model.save_pretrained(pytorch_dump_folder_path)
        # 将图像处理器保存到指定路径
        image_processor.save_pretrained(pytorch_dump_folder_path)

    if push_to_hub:
        # 如果需要将模型推送到Hub
        logger.info("Pushing model to the hub...")
        # 根据checkpoint_url选择对应的模型名称
        model_name = (
            "microsoft/table-transformer-detection"
            if "detection" in checkpoint_url
            else "microsoft/table-transformer-structure-recognition"
        )
        # 推送模型到Hub
        model.push_to_hub(model_name)
        # 推送图像处理器到Hub(与模型同名)
        image_processor.push_to_hub(model_name)
# 如果当前脚本作为主程序运行,则执行以下代码块
if __name__ == "__main__":
    # 创建参数解析器对象
    parser = argparse.ArgumentParser()

    # 添加命令行参数,用于指定模型检查点的下载地址,默认为公共表格检测模型的地址
    parser.add_argument(
        "--checkpoint_url",
        default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
        type=str,
        choices=[
            "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
            "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
        ],
        help="URL of the Table Transformer checkpoint you'd like to convert."
    )
    
    # 添加命令行参数,用于指定输出 PyTorch 模型的文件夹路径,默认为 None
    parser.add_argument(
        "--pytorch_dump_folder_path",
        default=None,
        type=str,
        help="Path to the folder to output PyTorch model."
    )
    
    # 添加命令行参数,用于指定是否将转换后的模型推送到 🤗 hub
    parser.add_argument(
        "--push_to_hub",
        action="store_true",
        help="Whether or not to push the converted model to the 🤗 hub."
    )
    
    # 解析命令行参数,并将结果存储在 args 对象中
    args = parser.parse_args()
    
    # 调用函数 convert_table_transformer_checkpoint,传入命令行参数中指定的参数值
    convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)

.\models\table_transformer\convert_table_transformer_to_hf_no_timm.py

# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Table Transformer checkpoints with native (Transformers) backbone.

URL: https://github.com/microsoft/table-transformer
"""


import argparse
from pathlib import Path

import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F

from transformers import DetrImageProcessor, ResNetConfig, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging


logging.set_verbosity_info()  # 设置日志记录级别为信息级别
logger = logging.get_logger(__name__)  # 获取当前模块的日志记录器


def create_rename_keys(config):
    # here we list all keys to be renamed (original name on the left, our name on the right)
    rename_keys = []  # 创建一个空列表,用于存储需要重命名的键值对

    # stem
    # fmt: off
    rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight"))
    rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight"))
    rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias"))
    rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean"))
    rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var"))
    # stages
    # fmt: on

    # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
    # 将旧的模型参数名称与新的模型参数名称对应起来,用于重命名模型权重和偏置
    rename_keys.extend(
        [
            ("input_proj.weight", "input_projection.weight"),  # 重命名输入投影层的权重参数
            ("input_proj.bias", "input_projection.bias"),  # 重命名输入投影层的偏置参数
            ("query_embed.weight", "query_position_embeddings.weight"),  # 重命名查询位置嵌入的权重参数
            ("transformer.decoder.norm.weight", "decoder.layernorm.weight"),  # 重命名解码器层归一化的权重参数
            ("transformer.decoder.norm.bias", "decoder.layernorm.bias"),  # 重命名解码器层归一化的偏置参数
            ("class_embed.weight", "class_labels_classifier.weight"),  # 重命名类标签分类器的权重参数
            ("class_embed.bias", "class_labels_classifier.bias"),  # 重命名类标签分类器的偏置参数
            ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),  # 重命名边界框预测器第一层权重参数
            ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),  # 重命名边界框预测器第一层偏置参数
            ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),  # 重命名边界框预测器第二层权重参数
            ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),  # 重命名边界框预测器第二层偏置参数
            ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),  # 重命名边界框预测器第三层权重参数
            ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),  # 重命名边界框预测器第三层偏置参数
            ("transformer.encoder.norm.weight", "encoder.layernorm.weight"),  # 重命名编码器层归一化的权重参数
            ("transformer.encoder.norm.bias", "encoder.layernorm.bias"),  # 重命名编码器层归一化的偏置参数
        ]
    )
    
    # 返回重命名后的键列表
    return rename_keys
# 重命名状态字典中的键,将旧键(old)对应的值弹出,并将其存储在变量val中,然后将新键(new)和val的对应关系添加到状态字典中
def rename_key(state_dict, old, new):
    val = state_dict.pop(old)
    state_dict[new] = val

# 从状态字典中读取查询(query)、键(keys)和值(values)的权重和偏置,并将它们重新组织存放到状态字典中
def read_in_q_k_v(state_dict, is_panoptic=False):
    prefix = ""
    if is_panoptic:
        prefix = "detr."

    # 遍历六层Transformer编码器
    for i in range(6):
        # 读取输入投影层的权重和偏置(在PyTorch的MultiHeadAttention中,这是单个矩阵加偏置)
        in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight")
        in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias")

        # 将查询投影的权重和偏置添加到状态字典中
        state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
        state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]

        # 将键投影的权重和偏置添加到状态字典中
        state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
        state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]

        # 将值投影的权重和偏置添加到状态字典中
        state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
        state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]

    # 接下来处理Transformer解码器(稍复杂,因为它还涉及跨注意力机制的处理)
    # 对于每个层次索引 i 在范围内从 0 到 5(共6个层次)
    for i in range(6):
        # 读取 self-attention 的输入投影层的权重和偏置
        in_proj_weight = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight")
        in_proj_bias = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias")
        
        # 将查询、键和值(按顺序)添加到状态字典中的 self-attention 部分
        state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
        state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
        state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
        state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
        state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
        state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
        
        # 读取 cross-attention 的输入投影层的权重和偏置
        in_proj_weight_cross_attn = state_dict.pop(
            f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight"
        )
        in_proj_bias_cross_attn = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias")
        
        # 将查询、键和值(按顺序)添加到状态字典中的 cross-attention 部分
        state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.weight"] = in_proj_weight_cross_attn[:256, :]
        state_dict[f"decoder.layers.{i}.encoder_attn.q_proj.bias"] = in_proj_bias_cross_attn[:256]
        state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.weight"] = in_proj_weight_cross_attn[256:512, :]
        state_dict[f"decoder.layers.{i}.encoder_attn.k_proj.bias"] = in_proj_bias_cross_attn[256:512]
        state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.weight"] = in_proj_weight_cross_attn[-256:, :]
        state_dict[f"decoder.layers.{i}.encoder_attn.v_proj.bias"] = in_proj_bias_cross_attn[-256:]
# 调整图像大小函数,根据指定的检查点 URL 判断目标大小,将图像调整为适当的尺寸
def resize(image, checkpoint_url):
    # 获取图像的宽度和高度
    width, height = image.size
    # 计算当前图像的最大尺寸
    current_max_size = max(width, height)
    # 根据检查点 URL 决定目标最大尺寸,检测模型使用 800,其他情况使用 1000
    target_max_size = 800 if "detection" in checkpoint_url else 1000
    # 计算调整比例
    scale = target_max_size / current_max_size
    # 调整图像大小
    resized_image = image.resize((int(round(scale * width)), int(round(scale * height))))

    return resized_image


# 标准化图像函数,使用 PyTorch 的转换工具进行图像标准化处理
def normalize(image):
    # 将图像转换为张量
    image = F.to_tensor(image)
    # 根据指定的均值和标准差对图像进行标准化
    image = F.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    return image


# 转换表格 Transformer 模型的检查点函数,加载模型权重并进行转换操作
@torch.no_grad()
def convert_table_transformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub):
    """
    Copy/paste/tweak model's weights to our DETR structure.
    """

    logger.info("Converting model...")

    # 创建 HuggingFace 模型并加载状态字典
    backbone_config = ResNetConfig.from_pretrained(
        "microsoft/resnet-18", out_features=["stage1", "stage2", "stage3", "stage4"]
    )

    # 使用给定配置创建 TableTransformerConfig 对象
    config = TableTransformerConfig(
        backbone_config=backbone_config,
        use_timm_backbone=False,
        mask_loss_coefficient=1,
        dice_loss_coefficient=1,
        ce_loss_coefficient=1,
        bbox_loss_coefficient=5,
        giou_loss_coefficient=2,
        eos_coefficient=0.4,
        class_cost=1,
        bbox_cost=5,
        giou_cost=2,
    )

    # 加载原始状态字典
    state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")

    # 重命名键名
    for src, dest in create_rename_keys(config):
        rename_key(state_dict, src, dest)
    # 处理查询、键和值矩阵需要特殊处理
    read_in_q_k_v(state_dict)
    # 重要:对基础模型键名添加前缀,因为头部模型使用不同的属性
    prefix = "model."
    for key in state_dict.copy().keys():
        if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"):
            val = state_dict.pop(key)
            state_dict[prefix + key] = val

    # 根据检查点 URL 设置不同的配置参数
    if "detection" in checkpoint_url:
        config.num_queries = 15
        config.num_labels = 2
        id2label = {0: "table", 1: "table rotated"}
        config.id2label = id2label
        config.label2id = {v: k for k, v in id2label.items()}
    else:
        config.num_queries = 125
        config.num_labels = 6
        id2label = {
            0: "table",
            1: "table column",
            2: "table row",
            3: "table column header",
            4: "table projected row header",
            5: "table spanning cell",
        }
        config.id2label = id2label
        config.label2id = {v: k for k, v in id2label.items()}

    # 创建图像处理器对象,指定输出格式为 coco_detection,最长边尺寸为 800
    image_processor = DetrImageProcessor(format="coco_detection", size={"longest_edge": 800})
    # 创建 TableTransformerForObjectDetection 模型对象
    model = TableTransformerForObjectDetection(config)
    # 加载转换后的状态字典
    model.load_state_dict(state_dict)
    # 设置模型为评估模式
    model.eval()

    # 验证转换结果
    filename = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
    # 使用 hf_hub_download 函数下载指定 repository ID 的文件,并返回文件路径
    file_path = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=filename)
    # 使用 PIL 库打开文件,并转换为 RGB 模式的图像对象
    image = Image.open(file_path).convert("RGB")
    # 调用 resize 函数对图像进行缩放并标准化像素值,然后添加一个维度以适应模型输入要求
    pixel_values = normalize(resize(image, checkpoint_url)).unsqueeze(0)

    # 将处理后的图像数据输入模型进行推断
    outputs = model(pixel_values)

    # 根据 checkpoint_url 是否包含 "detection" 字符串来设置预期的输出形状、logits 和 boxes
    if "detection" in checkpoint_url:
        expected_shape = (1, 15, 3)
        expected_logits = torch.tensor(
            [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]]
        )
        expected_boxes = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]])
    else:
        expected_shape = (1, 125, 7)
        expected_logits = torch.tensor(
            [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]]
        )
        expected_boxes = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]])

    # 使用断言检查模型输出的形状是否符合预期
    assert outputs.logits.shape == expected_shape
    # 使用断言检查模型输出的 logits 是否与预期的值在指定容差范围内相似
    assert torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)
    # 使用断言检查模型输出的 pred_boxes 是否与预期的值在指定容差范围内相似
    assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)
    # 输出确认信息,表明检查通过
    print("Looks ok!")

    # 如果 pytorch_dump_folder_path 不为 None,则保存模型和图像处理器到指定路径
    if pytorch_dump_folder_path is not None:
        logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
        Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
        model.save_pretrained(pytorch_dump_folder_path)
        image_processor.save_pretrained(pytorch_dump_folder_path)

    # 如果 push_to_hub 为 True,则将模型推送到 HF hub
    if push_to_hub:
        logger.info("Pushing model to the hub...")
        # 根据 checkpoint_url 中是否包含 "detection" 字符串选择不同的模型名称
        model_name = (
            "microsoft/table-transformer-detection"
            if "detection" in checkpoint_url
            else "microsoft/table-transformer-structure-recognition"
        )
        # 调用模型对象的 push_to_hub 方法将模型推送到 HF hub
        model.push_to_hub(model_name, revision="no_timm")
        # 同样将图像处理器对象推送到 HF hub
        image_processor.push_to_hub(model_name, revision="no_timm")
# 如果当前脚本被直接执行(而不是被导入到其他模块),则执行以下代码块
if __name__ == "__main__":
    # 创建参数解析器对象
    parser = argparse.ArgumentParser()

    # 添加命令行参数:--checkpoint_url,用于指定模型检查点的下载链接,默认为公共模型的检查点链接
    parser.add_argument(
        "--checkpoint_url",
        default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
        type=str,
        choices=[
            "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
            "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
        ],
        help="URL of the Table Transformer checkpoint you'd like to convert."
    )

    # 添加命令行参数:--pytorch_dump_folder_path,用于指定输出 PyTorch 模型的文件夹路径,默认为 None
    parser.add_argument(
        "--pytorch_dump_folder_path",
        default=None,
        type=str,
        help="Path to the folder to output PyTorch model."
    )

    # 添加命令行参数:--push_to_hub,一个布尔标志,表示是否将转换后的模型推送到 🤗 hub
    parser.add_argument(
        "--push_to_hub",
        action="store_true",
        help="Whether or not to push the converted model to the 🤗 hub."
    )

    # 解析命令行参数,并将它们存储在 args 对象中
    args = parser.parse_args()

    # 调用函数 convert_table_transformer_checkpoint,传入解析后的命令行参数
    convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)

.\models\table_transformer\modeling_table_transformer.py

# 设置文件编码为 UTF-8
# 版权声明和许可信息,指明版权归属及许可协议
# 除非符合许可协议,否则禁止使用此文件
# 可在以下链接获取 Apache License 2.0 的详细信息:http://www.apache.org/licenses/LICENSE-2.0
#
# 根据适用法律或书面协议约定,本软件是基于“按原样”提供的,没有任何明示或暗示的保证或条件
# 请查看许可协议,了解详细信息

""" PyTorch Table Transformer model."""

# 导入必要的库
import math
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union

import torch  # 导入 PyTorch 库
from torch import Tensor, nn  # 导入张量和神经网络模块

# 导入各种辅助函数和模型组件
from ...activations import ACT2FN  # 激活函数映射
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask  # 准备注意力掩码的实用函数
from ...modeling_outputs import (
    BaseModelOutput,
    BaseModelOutputWithCrossAttentions,
    Seq2SeqModelOutput,
)  # 模型输出相关类
from ...modeling_utils import PreTrainedModel  # 预训练模型基类
from ...utils import (
    ModelOutput,
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    is_accelerate_available,
    is_scipy_available,
    is_timm_available,
    is_vision_available,
    logging,
    replace_return_docstrings,
    requires_backends,
)  # 各种实用函数和工具

from ...utils.backbone_utils import load_backbone  # 加载骨干网络工具函数
from .configuration_table_transformer import TableTransformerConfig  # 导入 Table Transformer 的配置类


# 如果 SciPy 可用,则导入线性求和分配函数
if is_scipy_available():
    from scipy.optimize import linear_sum_assignment

# 如果 timm 可用,则导入创建模型函数
if is_timm_available():
    from timm import create_model

# 如果 vision 可用,则导入图像转换函数
if is_vision_available():
    from transformers.image_transforms import center_to_corners_format

# 如果 accelerate 可用,则导入部分状态和数据减少函数
if is_accelerate_available():
    from accelerate import PartialState
    from accelerate.utils import reduce

# 获取 logger 实例
logger = logging.get_logger(__name__)

# 文档中引用的配置信息
_CONFIG_FOR_DOC = "TableTransformerConfig"
# 文档中引用的检查点信息
_CHECKPOINT_FOR_DOC = "microsoft/table-transformer-detection"

# 预训练模型的存档列表
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
    "microsoft/table-transformer-detection",
    # 查看所有 Table Transformer 模型:https://huggingface.co/models?filter=table-transformer
]

# 数据类,用于封装 Table Transformer 解码器的输出
@dataclass
class TableTransformerDecoderOutput(BaseModelOutputWithCrossAttentions):
    """
    TABLE_TRANSFORMER 解码器输出的基类。该类在 BaseModelOutputWithCrossAttentions 基础上添加了一个属性,
    即中间解码器激活的堆栈,即每个解码器层的输出,每个输出通过 layernorm 处理。
    在使用辅助解码损失训练模型时特别有用。
    # 定义函数参数及其类型注释,说明函数接受的输入和返回值的数据类型和形状

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            模型最后一层的隐藏状态的序列。
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            元组,包含模型每一层的隐藏状态,形状为 `(batch_size, sequence_length, hidden_size)`。当传递 `output_hidden_states=True` 或 `config.output_hidden_states=True` 时返回。
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            元组,包含注意力权重张量,形状为 `(batch_size, num_heads, sequence_length, sequence_length)`。这些是经过注意力 softmax 后的注意力权重,用于计算自注意力头中的加权平均值。
        cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
            元组,包含解码器交叉注意力层的注意力权重张量,形状为 `(batch_size, num_heads, sequence_length, sequence_length)`。这些是经过注意力 softmax 后的注意力权重,用于计算交叉注意力头中的加权平均值。
        intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
            中间的解码器激活状态,即每个解码器层的输出,每个都经过 layernorm 处理。
    """

    # intermediate_hidden_states 可选的张量类型,表示中间解码器的隐藏状态,默认为 None
    intermediate_hidden_states: Optional[torch.FloatTensor] = None
# 定义一个数据类 TableTransformerModelOutput,继承自 Seq2SeqModelOutput,用于 TABLE_TRANSFORMER 模型的输出结果
@dataclass
class TableTransformerModelOutput(Seq2SeqModelOutput):
    """
    Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,
    namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them
    gone through a layernorm. This is useful when training the model with auxiliary decoding losses.
    """

    # 可选的中间隐藏状态堆栈,即每个解码器层的输出,经过 layernorm 处理
    intermediate_hidden_states: Optional[torch.FloatTensor] = None


# 定义一个数据类 TableTransformerObjectDetectionOutput,继承自 ModelOutput,用于 TABLE_TRANSFORMER 目标检测模型的输出结果
@dataclass
class TableTransformerObjectDetectionOutput(ModelOutput):
    """
    Output type of [`TableTransformerForObjectDetection`].
    """

    # 可选的损失值张量
    loss: Optional[torch.FloatTensor] = None
    # 可选的损失字典
    loss_dict: Optional[Dict] = None
    # logits,即模型的原始输出
    logits: torch.FloatTensor = None
    # 预测框的张量
    pred_boxes: torch.FloatTensor = None
    # 可选的辅助输出列表,每个元素是一个字典
    auxiliary_outputs: Optional[List[Dict]] = None
    # 最后的隐藏状态张量
    last_hidden_state: Optional[torch.FloatTensor] = None
    # 解码器的隐藏状态元组
    decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    # 解码器的注意力张量元组
    decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
    # 交叉注意力张量元组
    cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
    # 编码器最后的隐藏状态张量
    encoder_last_hidden_state: Optional[torch.FloatTensor] = None
    # 编码器的隐藏状态张量元组
    encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    # 编码器的注意力张量元组
    encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None


# 定义一个类 TableTransformerFrozenBatchNorm2d,继承自 nn.Module,用于冻结统计和仿射参数的 BatchNorm2d
class TableTransformerFrozenBatchNorm2d(nn.Module):
    """
    BatchNorm2d where the batch statistics and the affine parameters are fixed.

    Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
    torchvision.models.resnet[18,34,50,101] produce nans.
    """

    def __init__(self, n):
        super().__init__()
        # 注册权重张量,初始化为全 1
        self.register_buffer("weight", torch.ones(n))
        # 注册偏置张量,初始化为全 0
        self.register_buffer("bias", torch.zeros(n))
        # 注册运行时均值张量,初始化为全 0
        self.register_buffer("running_mean", torch.zeros(n))
        # 注册运行时方差张量,初始化为全 1
        self.register_buffer("running_var", torch.ones(n))

    def _load_from_state_dict(
        self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
    ):
        # 移除状态字典中的 num_batches_tracked 键,防止加载时出错
        num_batches_tracked_key = prefix + "num_batches_tracked"
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]

        # 调用父类方法加载状态字典
        super()._load_from_state_dict(
            state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
        )
    # 定义一个前向传播函数,接受输入张量 x
    def forward(self, x):
        # 将权重张量重塑为形状为 (1, C, 1, 1),其中 C 是通道数
        weight = self.weight.reshape(1, -1, 1, 1)
        # 将偏置张量重塑为形状为 (1, C, 1, 1),其中 C 是通道数
        bias = self.bias.reshape(1, -1, 1, 1)
        # 将运行时方差张量重塑为形状为 (1, C, 1, 1),其中 C 是通道数
        running_var = self.running_var.reshape(1, -1, 1, 1)
        # 将运行时均值张量重塑为形状为 (1, C, 1, 1),其中 C 是通道数
        running_mean = self.running_mean.reshape(1, -1, 1, 1)
        # 定义一个极小值常量 epsilon
        epsilon = 1e-5
        # 计算缩放因子,即 weight 乘以 (running_var + epsilon) 的倒数平方根
        scale = weight * (running_var + epsilon).rsqrt()
        # 根据运行时均值和缩放因子调整偏置
        bias = bias - running_mean * scale
        # 返回经过缩放和偏置调整后的输入张量 x
        return x * scale + bias
# 从 transformers.models.detr.modeling_detr.replace_batch_norm 复制而来,将所有的 torch.nn.BatchNorm2d 替换为 TableTransformerFrozenBatchNorm2d
def replace_batch_norm(model):
    """
    递归地将所有的 `torch.nn.BatchNorm2d` 替换为 `TableTransformerFrozenBatchNorm2d`。

    Args:
        model (torch.nn.Module):
            输入的模型
    """
    # 遍历模型的所有子模块
    for name, module in model.named_children():
        # 如果当前模块是 nn.BatchNorm2d 类型
        if isinstance(module, nn.BatchNorm2d):
            # 创建一个新的 TableTransformerFrozenBatchNorm2d 模块
            new_module = TableTransformerFrozenBatchNorm2d(module.num_features)

            # 如果原始的 BatchNorm 模块的设备不是 "meta"
            if not module.weight.device == torch.device("meta"):
                # 将权重、偏置、运行均值和方差复制到新模块中
                new_module.weight.data.copy_(module.weight)
                new_module.bias.data.copy_(module.bias)
                new_module.running_mean.data.copy_(module.running_mean)
                new_module.running_var.data.copy_(module.running_var)

            # 用新模块替换原始模块
            model._modules[name] = new_module

        # 如果当前模块还有子模块,则递归调用 replace_batch_norm
        if len(list(module.children())) > 0:
            replace_batch_norm(module)


# 从 transformers.models.detr.modeling_detr.DetrConvEncoder 复制而来,将 Detr 替换为 TableTransformer
class TableTransformerConvEncoder(nn.Module):
    """
    卷积骨干网络,使用 AutoBackbone API 或 timm 库中的一个模型。

    nn.BatchNorm2d 层被上面定义的 TableTransformerFrozenBatchNorm2d 替换。
    """

    def __init__(self, config):
        super().__init__()

        self.config = config

        # 根据配置选择使用 timm 的 backbone 还是加载自定义的 backbone
        if config.use_timm_backbone:
            # 如果使用 timm backbone,则确保 timm 被加载
            requires_backends(self, ["timm"])
            kwargs = {}
            if config.dilation:
                kwargs["output_stride"] = 16
            # 创建 timm 模型,仅提取特征
            backbone = create_model(
                config.backbone,
                pretrained=config.use_pretrained_backbone,
                features_only=True,
                out_indices=(1, 2, 3, 4),
                in_chans=config.num_channels,
                **kwargs,
            )
        else:
            # 否则加载自定义的 backbone
            backbone = load_backbone(config)

        # 用 frozen batch norm 替换 batch norm
        with torch.no_grad():
            replace_batch_norm(backbone)
        self.model = backbone
        # 获取骨干网络输出通道数信息
        self.intermediate_channel_sizes = (
            self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
        )

        # 根据 backbone 模型类型设置是否冻结某些参数
        backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type
        if "resnet" in backbone_model_type:
            for name, parameter in self.model.named_parameters():
                if config.use_timm_backbone:
                    if "layer2" not in name and "layer3" not in name and "layer4" not in name:
                        parameter.requires_grad_(False)
                else:
                    if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
                        parameter.requires_grad_(False)
    def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
        # 将像素值通过模型传递,获取特征图列表
        features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps

        # 初始化输出列表
        out = []
        # 遍历特征图列表
        for feature_map in features:
            # 将像素掩码下采样至与对应特征图相同的形状
            mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
            # 将特征图及其对应的掩码添加到输出列表
            out.append((feature_map, mask))
        # 返回输出列表
        return out
# 从transformers.models.detr.modeling_detr.DetrConvModel复制到TableTransformerConvModel,将Detr替换为TableTransformer
class TableTransformerConvModel(nn.Module):
    """
    This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
    """

    def __init__(self, conv_encoder, position_embedding):
        super().__init__()
        self.conv_encoder = conv_encoder  # 初始化卷积编码器
        self.position_embedding = position_embedding  # 初始化位置嵌入模块

    def forward(self, pixel_values, pixel_mask):
        # 通过骨干网络处理像素值和像素掩码,得到(特征图,像素掩码)元组的列表
        out = self.conv_encoder(pixel_values, pixel_mask)
        pos = []
        for feature_map, mask in out:
            # 执行位置编码
            pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))

        return out, pos


# 从transformers.models.detr.modeling_detr.DetrSinePositionEmbedding复制到TableTransformerSinePositionEmbedding,将Detr替换为TableTransformer
class TableTransformerSinePositionEmbedding(nn.Module):
    """
    This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
    need paper, generalized to work on images.
    """

    def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
        super().__init__()
        self.embedding_dim = embedding_dim  # 嵌入维度
        self.temperature = temperature  # 温度参数
        self.normalize = normalize  # 是否进行归一化
        if scale is not None and normalize is False:
            raise ValueError("normalize should be True if scale is passed")
        if scale is None:
            scale = 2 * math.pi
        self.scale = scale  # 缩放参数

    def forward(self, pixel_values, pixel_mask):
        if pixel_mask is None:
            raise ValueError("No pixel mask provided")  # 如果未提供像素掩码则抛出错误
        y_embed = pixel_mask.cumsum(1, dtype=torch.float32)  # 在y方向上累积和
        x_embed = pixel_mask.cumsum(2, dtype=torch.float32)  # 在x方向上累积和
        if self.normalize:
            y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale  # 归一化y方向上的累积和
            x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale  # 归一化x方向上的累积和

        dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float()  # 创建维度张量
        dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim)  # 计算温度向量

        pos_x = x_embed[:, :, :, None] / dim_t  # 计算x方向位置编码
        pos_y = y_embed[:, :, :, None] / dim_t  # 计算y方向位置编码
        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)  # 奇偶数位进行sin和cos变换,然后展开
        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)  # 奇偶数位进行sin和cos变换,然后展开
        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)  # 连接并转置位置编码

        return pos


# 从transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding复制到TableTransformerLearnedPositionEmbedding,将Detr替换为TableTransformer
class TableTransformerLearnedPositionEmbedding(nn.Module):
    """
    This module learns positional embeddings up to a fixed maximum size.
    """
    # 初始化函数,设置嵌入维度并调用父类的初始化方法
    def __init__(self, embedding_dim=256):
        super().__init__()
        # 创建一个包含50个元素的行嵌入对象,每个元素的维度为embedding_dim
        self.row_embeddings = nn.Embedding(50, embedding_dim)
        # 创建一个包含50个元素的列嵌入对象,每个元素的维度为embedding_dim
        self.column_embeddings = nn.Embedding(50, embedding_dim)

    # 前向传播函数,接收像素值和可选的像素掩码作为输入
    def forward(self, pixel_values, pixel_mask=None):
        # 获取像素值的高度和宽度
        height, width = pixel_values.shape[-2:]
        # 在设备上生成从0到width-1的张量,用于列嵌入查询
        width_values = torch.arange(width, device=pixel_values.device)
        # 在设备上生成从0到height-1的张量,用于行嵌入查询
        height_values = torch.arange(height, device=pixel_values.device)
        # 查询列嵌入,x_emb的形状为[width, embedding_dim]
        x_emb = self.column_embeddings(width_values)
        # 查询行嵌入,y_emb的形状为[height, embedding_dim]
        y_emb = self.row_embeddings(height_values)
        # 创建位置编码张量pos,将列嵌入和行嵌入连接起来,形状为[height, width, 2*embedding_dim]
        pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
        # 将pos张量的维度重新排列为[2*embedding_dim, height, width]
        pos = pos.permute(2, 0, 1)
        # 在第0维度上添加一个维度,形状变为[1, 2*embedding_dim, height, width]
        pos = pos.unsqueeze(0)
        # 将pos张量在第0维度上复制pixel_values.shape[0]次,形状变为[pixel_values.shape[0], 2*embedding_dim, height, width]
        pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
        # 返回位置编码张量pos作为模型的输出
        return pos
# 从transformers.models.detr.modeling_detr.build_position_encoding复制而来,将Detr->TableTransformer
def build_position_encoding(config):
    # 根据配置计算位置编码的步数
    n_steps = config.d_model // 2
    # 如果位置嵌入类型是"sine"
    if config.position_embedding_type == "sine":
        # TODO 找到更好的方式暴露其他参数
        # 使用正弦位置嵌入初始化位置嵌入对象
        position_embedding = TableTransformerSinePositionEmbedding(n_steps, normalize=True)
    # 如果位置嵌入类型是"learned"
    elif config.position_embedding_type == "learned":
        # 使用学习得到的位置嵌入初始化位置嵌入对象
        position_embedding = TableTransformerLearnedPositionEmbedding(n_steps)
    else:
        # 抛出数值错误,指明不支持的位置嵌入类型
        raise ValueError(f"Not supported {config.position_embedding_type}")

    return position_embedding


# 从transformers.models.detr.modeling_detr.DetrAttention复制而来,将DETR->TABLE_TRANSFORMER,Detr->TableTransformer
class TableTransformerAttention(nn.Module):
    """
    'Attention Is All You Need' 论文中的多头注意力机制。

    在这里,我们为查询和键添加位置嵌入(如TABLE_TRANSFORMER论文中所解释的)。
    """

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        dropout: float = 0.0,
        bias: bool = True,
    ):
        super().__init__()
        # 初始化注意力机制的参数
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        # 如果embed_dim不能被num_heads整除,抛出数值错误
        if self.head_dim * num_heads != self.embed_dim:
            raise ValueError(
                f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: "
                f"{num_heads})."
            )
        # 缩放因子为头维度的倒数平方根
        self.scaling = self.head_dim**-0.5

        # 线性映射函数,用于变换查询、键和值
        self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

    # 将张量重塑为适合多头注意力机制的形状
    def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
        return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

    # 在给定位置嵌入的情况下,添加位置嵌入到张量中
    def with_pos_embed(self, tensor: torch.Tensor, object_queries: Optional[Tensor], **kwargs):
        position_embeddings = kwargs.pop("position_embeddings", None)

        # 如果有未预期的参数,则抛出数值错误
        if kwargs:
            raise ValueError(f"Unexpected arguments {kwargs.keys()}")

        # 如果同时指定了position_embeddings和object_queries,则抛出数值错误
        if position_embeddings is not None and object_queries is not None:
            raise ValueError(
                "Cannot specify both position_embeddings and object_queries. Please use just object_queries"
            )

        # 如果使用了position_embeddings,则发出警告,建议使用object_queries
        if position_embeddings is not None:
            logger.warning_once(
                "position_embeddings has been deprecated and will be removed in v4.34. Please use object_queries instead"
            )
            object_queries = position_embeddings

        # 如果object_queries为None,则直接返回张量;否则返回张量加上object_queries
        return tensor if object_queries is None else tensor + object_queries
    # 定义一个前向传播方法,用于模型推断或训练过程中的向前计算
    def forward(
        self,
        # 隐藏状态作为输入,通常是模型中的中间表示
        hidden_states: torch.Tensor,
        # 注意力掩码,用于指定哪些位置的输入需要注意力处理
        attention_mask: Optional[torch.Tensor] = None,
        # 目标查询,用于指定模型关注的特定目标或查询信息
        object_queries: Optional[torch.Tensor] = None,
        # 键值状态,用于注意力机制中的键值对应
        key_value_states: Optional[torch.Tensor] = None,
        # 空间位置嵌入,可能是与空间相关的位置信息的嵌入表示
        spatial_position_embeddings: Optional[torch.Tensor] = None,
        # 是否输出注意力权重
        output_attentions: bool = False,
        # 其他可选的关键字参数,传递给函数的额外参数
        **kwargs,
class TableTransformerEncoderLayer(nn.Module):
    # 使用 TableTransformerConfig 配置初始化编码器层
    # 从 transformers.models.detr.modeling_detr.DetrEncoderLayer.__init__ 复制而来,将 Detr 替换为 TableTransformer
    def __init__(self, config: TableTransformerConfig):
        super().__init__()
        # 设定嵌入维度为模型配置中的 d_model
        self.embed_dim = config.d_model
        # 初始化自注意力层,使用 TableTransformerAttention 类
        self.self_attn = TableTransformerAttention(
            embed_dim=self.embed_dim,
            num_heads=config.encoder_attention_heads,
            dropout=config.attention_dropout,
        )
        # 初始化自注意力层后的 LayerNorm
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        # 设定 dropout 率
        self.dropout = config.dropout
        # 激活函数根据配置选择对应的函数
        self.activation_fn = ACT2FN[config.activation_function]
        # 激活函数的 dropout 率
        self.activation_dropout = config.activation_dropout
        # 第一个全连接层,将嵌入维度映射到编码器中的前馈网络维度
        self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
        # 第二个全连接层,将前馈网络维度映射回嵌入维度
        self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
        # 最终的 LayerNorm 层
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    # 前向传播函数,接受隐藏状态、注意力掩码以及可能的对象查询作为输入
    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: torch.Tensor,
        object_queries: torch.Tensor = None,
        output_attentions: bool = False,
    ):
        """
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
                输入到层的隐藏状态张量,形状为 `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
                values.
                注意力掩码张量,大小为 `(batch, 1, target_len, source_len)`,其中填充元素由非常大的负值表示。
            object_queries (`torch.FloatTensor`, *optional*): object queries, to be added to hidden_states.
                目标查询张量,可选项,将添加到隐藏状态中。
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
                是否返回所有注意力层的注意力张量。详细信息请参阅返回的张量中的 `attentions` 部分。

        """
        residual = hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)

        hidden_states, attn_weights = self.self_attn(
            hidden_states=hidden_states,
            attention_mask=attention_mask,
            object_queries=object_queries,
            output_attentions=output_attentions,
        )

        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states

        residual = hidden_states
        hidden_states = self.final_layer_norm(hidden_states)

        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)

        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)

        hidden_states = residual + hidden_states

        if self.training:
            if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
                clamp_value = torch.finfo(hidden_states.dtype).max - 1000
                hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)

        outputs = (hidden_states,)

        if output_attentions:
            outputs += (attn_weights,)

        return outputs
# 从 transformers.models.detr.modeling_detr.DetrDecoderLayer.__init__ 复制而来,用于定义 TableTransformerDecoderLayer 类
class TableTransformerDecoderLayer(nn.Module):
    # 构造函数,初始化 TableTransformerDecoderLayer 类
    def __init__(self, config: TableTransformerConfig):
        super().__init__()
        self.embed_dim = config.d_model

        # 自注意力机制,用于学习输入序列内部的关系
        self.self_attn = TableTransformerAttention(
            embed_dim=self.embed_dim,
            num_heads=config.decoder_attention_heads,
            dropout=config.attention_dropout,
        )
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout

        # 层归一化,用于标准化自注意力输出
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        
        # 编码器-解码器注意力机制,用于学习输入序列与编码器隐藏状态之间的关系
        self.encoder_attn = TableTransformerAttention(
            self.embed_dim,
            config.decoder_attention_heads,
            dropout=config.attention_dropout,
        )
        self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        
        # 前馈神经网络的第一层
        self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
        # 前馈神经网络的第二层
        self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
        
        # 最终的层归一化,用于标准化前馈神经网络的输出
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    # 前向传播函数,定义了数据从输入到输出的流动过程
    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        object_queries: Optional[torch.Tensor] = None,
        query_position_embeddings: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
        output_attentions: Optional[bool] = False,
    ):
        # 略
        pass

# 从 transformers.models.detr.modeling_detr.DetrClassificationHead 复制而来,用于定义 TableTransformerClassificationHead 类
class TableTransformerClassificationHead(nn.Module):
    """Head for sentence-level classification tasks."""

    # 构造函数,初始化 TableTransformerClassificationHead 类
    def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
        super().__init__()
        # 密集连接层,将输入维度映射到内部维度
        self.dense = nn.Linear(input_dim, inner_dim)
        # Dropout 层,用于防止过拟合
        self.dropout = nn.Dropout(p=pooler_dropout)
        # 输出投影层,将内部维度映射到类别数量
        self.out_proj = nn.Linear(inner_dim, num_classes)

    # 前向传播函数,定义了数据从输入到输出的流动过程
    def forward(self, hidden_states: torch.Tensor):
        # Dropout 操作,随机置零部分输入数据,防止过拟合
        hidden_states = self.dropout(hidden_states)
        # 密集连接层,将输入数据映射到内部维度
        hidden_states = self.dense(hidden_states)
        # Tanh 激活函数,增加非线性特性
        hidden_states = torch.tanh(hidden_states)
        # Dropout 操作,再次防止过拟合
        hidden_states = self.dropout(hidden_states)
        # 输出投影层,将映射后的数据映射到类别数量
        hidden_states = self.out_proj(hidden_states)
        return hidden_states


# TableTransformerPreTrainedModel 类,用于预训练模型的基类
class TableTransformerPreTrainedModel(PreTrainedModel):
    # 指定配置类
    config_class = TableTransformerConfig
    # 基础模型前缀
    base_model_prefix = "model"
    # 主输入名称,通常是像素值
    main_input_name = "pixel_values"
    # 不拆分的模块列表,用于标记不应拆分的模块名称
    _no_split_modules = [
        r"TableTransformerConvEncoder",
        r"TableTransformerEncoderLayer",
        r"TableTransformerDecoderLayer",
    ]
    # 初始化模型的权重,使用给定的标准差
    def _init_weights(self, module):
        std = self.config.init_std  # 从配置中获取初始化的标准差

        # 如果模块是TableTransformerLearnedPositionEmbedding类型,对其行和列的嵌入进行均匀分布初始化
        if isinstance(module, TableTransformerLearnedPositionEmbedding):
            nn.init.uniform_(module.row_embeddings.weight)  # 均匀分布初始化行嵌入权重
            nn.init.uniform_(module.column_embeddings.weight)  # 均匀分布初始化列嵌入权重

        # 如果模块是线性层、二维卷积层或批归一化层,使用正态分布初始化权重
        if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
            # 与TensorFlow版本稍有不同,PyTorch使用正态分布而不是截断正态分布进行初始化
            # 参考:https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=std)  # 使用正态分布初始化权重
            if module.bias is not None:
                module.bias.data.zero_()  # 如果有偏置项,初始化为零向量

        # 如果模块是嵌入层,使用正态分布初始化权重
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)  # 使用正态分布初始化权重
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()  # 如果有填充索引,将填充索引位置的权重初始化为零向量
# 表格转换器的输入文档字符串,用于说明模型的输入格式和参数
TABLE_TRANSFORMER_INPUTS_DOCSTRING = r"""
    # Args: 函数的参数说明开始
    pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
        # 像素值。默认情况下将忽略填充部分。
        # 使用 `DetrImageProcessor` 可以获取像素值。详见 [`DetrImageProcessor.__call__`]。
        
    pixel_mask (`torch.FloatTensor` of shape `(batch_size, height, width)`, *optional*):
        # 用于避免在填充像素值上执行注意力的掩码。
        # 掩码值在 `[0, 1]` 之间:
        # - 1 表示真实像素(即**未遮蔽**),
        # - 0 表示填充像素(即**已遮蔽**)。
        # [What are attention masks?](../glossary#attention-mask)
        
    decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):
        # 默认情况下不使用。可用于屏蔽对象查询。
        
    encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
        # 元组包含 (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
        # `last_hidden_state` 的形状为 `(batch_size, sequence_length, hidden_size)`,*optional*)是编码器最后一层的隐藏状态的序列。在解码器的交叉注意力中使用。
        
    inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
        # 可选,而不是传递扁平化特征图(骨干网络输出 + 投影层的输出),可以直接传递图像的扁平化表示。
        
    decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
        # 可选,而不是使用零张量初始化查询,可以直接传递嵌入表示。
        
    output_attentions (`bool`, *optional*):
        # 是否返回所有注意力层的注意力张量。详见返回的张量中的 `attentions` 以获取更多细节。
        
    output_hidden_states (`bool`, *optional*):
        # 是否返回所有层的隐藏状态。详见返回的张量中的 `hidden_states` 以获取更多细节。
        
    return_dict (`bool`, *optional*):
        # 是否返回 [`~utils.ModelOutput`] 而不是普通元组。
"""
@add_start_docstrings(
    """
    The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
    hidden-states without any specific head on top.
    """,
    TABLE_TRANSFORMER_START_DOCSTRING,
)
"""
# 从TableTransformerPreTrainedModel类继承,并重写__init__方法,初始化TableTransformerModel对象
class TableTransformerModel(TableTransformerPreTrainedModel):
    # 从transformers.models.detr.modeling_detr.DetrModel.__init__复制而来,将Detr替换为TableTransformer
    def __init__(self, config: TableTransformerConfig):
        # 调用父类的构造方法初始化模型
        super().__init__(config)

        # 创建骨干网络(backbone)和位置编码
        backbone = TableTransformerConvEncoder(config)
        object_queries = build_position_encoding(config)
        self.backbone = TableTransformerConvModel(backbone, object_queries)

        # 创建投影层,将骨干网络的输出进行卷积投影到config.d_model维度
        self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1)

        # 创建查询位置嵌入层,根据config.num_queries和config.d_model创建一个嵌入层
        self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model)

        # 创建编码器和解码器
        self.encoder = TableTransformerEncoder(config)
        self.decoder = TableTransformerDecoder(config)

        # 初始化权重并应用最终处理
        self.post_init()

    # 返回编码器对象
    def get_encoder(self):
        return self.encoder

    # 返回解码器对象
    def get_decoder(self):
        return self.decoder

    # 冻结骨干网络的参数,使其在反向传播中不更新
    def freeze_backbone(self):
        for name, param in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(False)

    # 解除骨干网络参数的冻结,使其在反向传播中更新
    def unfreeze_backbone(self):
        for name, param in self.backbone.conv_encoder.model.named_parameters():
            param.requires_grad_(True)

    # 重写forward方法,定义模型的前向传播逻辑,接收多个输入参数
    @add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TableTransformerModelOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        pixel_values: torch.FloatTensor,
        pixel_mask: Optional[torch.FloatTensor] = None,
        decoder_attention_mask: Optional[torch.FloatTensor] = None,
        encoder_outputs: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        """
        Table Transformer Model(由骨干网络和编码器-解码器Transformer组成),顶部带有用于例如COCO检测的对象检测头部。
        """
        # 实现前向传播逻辑,具体实现由具体的Table Transformer模型定义
        pass
    def __init__(self, config: TableTransformerConfig):
        # 调用父类的构造函数,初始化对象
        super().__init__(config)

        # 创建表格转换器模型
        self.model = TableTransformerModel(config)

        # 创建对象检测头部的类别分类器
        self.class_labels_classifier = nn.Linear(
            config.d_model, config.num_labels + 1
        )  # 添加一个类别用于表示“没有对象”

        # 创建表格转换器的边界框预测器
        self.bbox_predictor = TableTransformerMLPPredictionHead(
            input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
        )

        # 初始化权重并进行最终处理
        self.post_init()

    @torch.jit.unused
    # 从 transformers.models.detr.modeling_detr.DetrForObjectDetection._set_aux_loss 复制而来
    def _set_aux_loss(self, outputs_class, outputs_coord):
        # 这是为了使 torchscript 正常工作的一种解决方法,因为 torchscript 不支持具有非同构值的字典,
        # 例如同时包含张量和列表的字典。
        return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]

    @add_start_docstrings_to_model_forward(TABLE_TRANSFORMER_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=TableTransformerObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        pixel_values: torch.FloatTensor,
        pixel_mask: Optional[torch.FloatTensor] = None,
        decoder_attention_mask: Optional[torch.FloatTensor] = None,
        encoder_outputs: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[List[Dict]] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
# Copied from transformers.models.detr.modeling_detr.dice_loss
# 计算 DICE 损失,类似于面向掩码的广义 IOU

def dice_loss(inputs, targets, num_boxes):
    """
    Compute the DICE loss, similar to generalized IOU for masks

    Args:
        inputs: A float tensor of arbitrary shape.
                The predictions for each example.
        targets: A float tensor with the same shape as inputs. Stores the binary
                 classification label for each element in inputs (0 for the negative class and 1 for the positive
                 class).
    """
    # 对预测进行 sigmoid 激活,确保在 (0, 1) 范围内
    inputs = inputs.sigmoid()
    # 将输入展平为二维(batch_size, -1)
    inputs = inputs.flatten(1)
    # 计算 DICE 损失的分子部分
    numerator = 2 * (inputs * targets).sum(1)
    # 计算 DICE 损失的分母部分
    denominator = inputs.sum(-1) + targets.sum(-1)
    # 计算最终的 DICE 损失
    loss = 1 - (numerator + 1) / (denominator + 1)
    # 返回平均损失除以框的数量
    return loss.sum() / num_boxes


# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss
# 计算用于密集检测的 sigmoid focal loss

def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
    """
    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.

    Args:
        inputs (`torch.FloatTensor` of arbitrary shape):
            The predictions for each example.
        targets (`torch.FloatTensor` with the same shape as `inputs`)
            A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
            and 1 for the positive class).
        alpha (`float`, *optional*, defaults to `0.25`):
            Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
        gamma (`int`, *optional*, defaults to `2`):
            Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.

    Returns:
        Loss tensor
    """
    # 对输入进行 sigmoid 激活
    prob = inputs.sigmoid()
    # 计算二元交叉熵损失
    ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
    # 计算 p_t,用于调节损失
    p_t = prob * targets + (1 - prob) * (1 - targets)
    # 计算最终损失,加入调节因子 gamma
    loss = ce_loss * ((1 - p_t) ** gamma)

    # 如果 alpha 大于等于 0,则应用平衡因子 alpha_t
    if alpha >= 0:
        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
        loss = alpha_t * loss

    # 返回每个样本的平均损失除以框的数量
    return loss.mean(1).sum() / num_boxes


# Copied from transformers.models.detr.modeling_detr.DetrLoss with Detr->TableTransformer,detr->table_transformer
# 计算用于 TableTransformerForObjectDetection/TableTransformerForSegmentation 的损失

class TableTransformerLoss(nn.Module):
    """
    This class computes the losses for TableTransformerForObjectDetection/TableTransformerForSegmentation. The process happens in two steps: 1)
    we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair
    of matched ground-truth / prediction (supervise class and box).

    A note on the `num_classes` argument (copied from original repo in table_transformer.py): "the naming of the `num_classes`
    parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is
    the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to
    """
    # 此类计算 TableTransformerForObjectDetection/TableTransformerForSegmentation 的损失。过程分为两步:1)
    # 计算真实框与模型输出之间的匈牙利分配 2)监督每对匹配的真实框/预测(监督类别和框)

    def __init__(self):
        super().__init__()

    def forward(self):
        # 此处应该实现具体的损失计算逻辑,但代码未完整给出
        pass
    """
    Module for computing losses in table transformer model.

    This module defines methods for calculating classification and cardinality errors for
    object detection tasks using a transformer-based approach.

    Args:
        matcher (`TableTransformerHungarianMatcher`):
            Module able to compute a matching between targets and proposals.
        num_classes (`int`):
            Number of object categories, omitting the special no-object category.
        eos_coef (`float`):
            Relative classification weight applied to the no-object category.
        losses (`List[str]`):
            List of all the losses to be applied. See `get_loss` for a list of all available losses.
    """

    def __init__(self, matcher, num_classes, eos_coef, losses):
        super().__init__()
        self.matcher = matcher  # Initialize the matcher module for matching targets and proposals
        self.num_classes = num_classes  # Set the number of object categories
        self.eos_coef = eos_coef  # Set the coefficient for the no-object category weight
        self.losses = losses  # Store the list of losses to be used
        empty_weight = torch.ones(self.num_classes + 1)
        empty_weight[-1] = self.eos_coef
        self.register_buffer("empty_weight", empty_weight)  # Register the empty weight tensor

    # removed logging parameter, which was part of the original implementation
    def loss_labels(self, outputs, targets, indices, num_boxes):
        """
        Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim
        [nb_target_boxes]
        """
        if "logits" not in outputs:
            raise KeyError("No logits were found in the outputs")
        source_logits = outputs["logits"]  # Extract logits from model outputs

        idx = self._get_source_permutation_idx(indices)  # Get indices for permutation
        target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])  # Gather target class labels
        target_classes = torch.full(
            source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
        )
        target_classes[idx] = target_classes_o  # Assign target class labels according to permutation indices

        loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight)
        losses = {"loss_ce": loss_ce}  # Compute and store classification loss

        return losses

    @torch.no_grad()
    def loss_cardinality(self, outputs, targets, indices, num_boxes):
        """
        Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.

        This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
        """
        logits = outputs["logits"]  # Extract logits from model outputs
        device = logits.device
        target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)  # Calculate target lengths
        # Count the number of predictions that are NOT "no-object" (which is the last class)
        card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
        card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())  # Compute cardinality error
        losses = {"cardinality_error": card_err}  # Store cardinality error

        return losses
    def loss_boxes(self, outputs, targets, indices, num_boxes):
        """
        Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.

        Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
        are expected in format (center_x, center_y, w, h), normalized by the image size.
        """
        # 如果输出中没有预测的边界框,则抛出关键错误信息
        if "pred_boxes" not in outputs:
            raise KeyError("No predicted boxes found in outputs")
        
        # 根据索引获取源排列的索引
        idx = self._get_source_permutation_idx(indices)
        
        # 从输出中获取预测的边界框并按照索引排列
        source_boxes = outputs["pred_boxes"][idx]
        
        # 从目标中获取所有目标边界框并连接成一个张量
        target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)

        # 计算 L1 回归损失
        loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")

        losses = {}
        # 将 L1 损失求和并除以边界框的数量,作为边界框损失
        losses["loss_bbox"] = loss_bbox.sum() / num_boxes

        # 计算 GIoU 损失
        loss_giou = 1 - torch.diag(
            generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
        )
        # 将 GIoU 损失求和并除以边界框的数量,作为 GIoU 损失
        losses["loss_giou"] = loss_giou.sum() / num_boxes
        return losses

    def loss_masks(self, outputs, targets, indices, num_boxes):
        """
        Compute the losses related to the masks: the focal loss and the dice loss.

        Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w].
        """
        # 如果输出中没有预测的掩码,则抛出关键错误信息
        if "pred_masks" not in outputs:
            raise KeyError("No predicted masks found in outputs")

        # 获取源排列索引和目标排列索引
        source_idx = self._get_source_permutation_idx(indices)
        target_idx = self._get_target_permutation_idx(indices)
        
        # 从输出中获取预测的掩码并按照源排列索引排序
        source_masks = outputs["pred_masks"]
        source_masks = source_masks[source_idx]
        
        # 从目标中获取所有目标掩码并转换为嵌套张量
        masks = [t["masks"] for t in targets]
        target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
        target_masks = target_masks.to(source_masks)
        target_masks = target_masks[target_idx]

        # 将预测掩码插值到目标尺寸
        source_masks = nn.functional.interpolate(
            source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
        )
        source_masks = source_masks[:, 0].flatten(1)

        target_masks = target_masks.flatten(1)
        target_masks = target_masks.view(source_masks.shape)
        
        losses = {
            # 计算 sigmoid focal loss 作为掩码损失
            "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
            # 计算 dice loss 作为掩码损失
            "loss_dice": dice_loss(source_masks, target_masks, num_boxes),
        }
        return losses

    def _get_source_permutation_idx(self, indices):
        # 返回批次索引和源索引
        batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
        source_idx = torch.cat([source for (source, _) in indices])
        return batch_idx, source_idx
    def _get_target_permutation_idx(self, indices):
        # 根据给定的索引重排目标数据的批次索引和目标索引
        # 生成与每个目标对应的批次索引
        batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
        # 生成所有目标的合并索引
        target_idx = torch.cat([target for (_, target) in indices])
        return batch_idx, target_idx

    def get_loss(self, loss, outputs, targets, indices, num_boxes):
        # 定义损失函数字典
        loss_map = {
            "labels": self.loss_labels,
            "cardinality": self.loss_cardinality,
            "boxes": self.loss_boxes,
            "masks": self.loss_masks,
        }
        # 如果指定的损失不在损失函数字典中,则引发错误
        if loss not in loss_map:
            raise ValueError(f"Loss {loss} not supported")
        # 调用相应的损失函数并返回计算结果
        return loss_map[loss](outputs, targets, indices, num_boxes)

    def forward(self, outputs, targets):
        """
        This performs the loss computation.

        Args:
             outputs (`dict`, *optional*):
                Dictionary of tensors, see the output specification of the model for the format.
             targets (`List[dict]`, *optional*):
                List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
                losses applied, see each loss' doc.
        """
        # 过滤掉辅助输出以减少处理量
        outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}

        # 利用匹配器函数找到输出与目标之间的匹配关系
        indices = self.matcher(outputs_without_aux, targets)

        # 计算所有节点上目标框的平均数,用于归一化
        num_boxes = sum(len(t["class_labels"]) for t in targets)
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        world_size = 1
        # 如果加速库可用,执行以下操作
        if is_accelerate_available():
            if PartialState._shared_state != {}:
                # 通过降维处理目标框数量
                num_boxes = reduce(num_boxes)
                # 获取当前处理器数量
                world_size = PartialState().num_processes
        # 将目标框数量归一化,并确保不低于1
        num_boxes = torch.clamp(num_boxes / world_size, min=1).item()

        # 计算所有请求的损失
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))

        # 如果存在辅助损失,则对每个中间层的输出进行相同的损失计算
        if "auxiliary_outputs" in outputs:
            for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
                indices = self.matcher(auxiliary_outputs, targets)
                for loss in self.losses:
                    if loss == "masks":
                        # 中间层的掩码损失计算成本过高,忽略掉
                        continue
                    # 生成特定中间层的损失字典,并加入到总损失字典中
                    l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
                    l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
                    losses.update(l_dict)

        return losses
# 从 transformers.models.detr.modeling_detr.DetrMLPPredictionHead 复制而来,仅修改了 Detr -> TableTransformer,定义了 TableTransformerMLPPredictionHead 类
class TableTransformerMLPPredictionHead(nn.Module):
    """
    简单的多层感知机(MLP,也称为 FFN),用于预测相对于图像的标准化中心坐标、高度和宽度的类。

    从 https://github.com/facebookresearch/table_transformer/blob/master/models/table_transformer.py 复制而来
    """

    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        self.num_layers = num_layers
        h = [hidden_dim] * (num_layers - 1)
        # 创建多层线性层组成的列表,用于构建 MLP 网络
        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))

    def forward(self, x):
        # 遍历 MLP 网络的每一层并应用激活函数 ReLU(除了最后一层)
        for i, layer in enumerate(self.layers):
            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
        return x


# 从 transformers.models.detr.modeling_detr.DetrHungarianMatcher 复制而来,仅修改了 Detr -> TableTransformer
class TableTransformerHungarianMatcher(nn.Module):
    """
    这个类计算网络目标和预测之间的分配。

    由于效率原因,目标不包括 no_object。因此,通常预测比目标多。在这种情况下,我们对最佳预测进行一对一匹配,而其他预测则未匹配(因此视为非对象)。

    Args:
        class_cost:
            在匹配成本中分类错误的相对权重。
        bbox_cost:
            在匹配成本中边界框坐标的 L1 误差的相对权重。
        giou_cost:
            在匹配成本中边界框的 giou 损失的相对权重。
    """

    def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
        super().__init__()
        # 确保需要的后端库已经安装
        requires_backends(self, ["scipy"])

        self.class_cost = class_cost
        self.bbox_cost = bbox_cost
        self.giou_cost = giou_cost
        if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
            # 如果匹配器的所有成本都为 0,则引发 ValueError
            raise ValueError("All costs of the Matcher can't be 0")

    @torch.no_grad()
    def forward(self, outputs, targets):
        """
        Args:
            outputs (`dict`):
                A dictionary that contains at least these entries:
                * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
                * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
            targets (`List[dict]`):
                A list of targets (len(targets) = batch_size), where each target is a dict containing:
                * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
                  ground-truth objects in the target) containing the class labels
                * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.

        Returns:
            `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
            - index_i is the indices of the selected predictions (in order)
            - index_j is the indices of the corresponding selected targets (in order)
            For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
        """
        batch_size, num_queries = outputs["logits"].shape[:2]

        # We flatten to compute the cost matrices in a batch
        # 将 logits 展平并进行 softmax,用于计算分类损失
        out_prob = outputs["logits"].flatten(0, 1).softmax(-1)  # [batch_size * num_queries, num_classes]
        
        # 将预测的边界框坐标展平,用于计算边界框回归损失
        out_bbox = outputs["pred_boxes"].flatten(0, 1)  # [batch_size * num_queries, 4]

        # Also concat the target labels and boxes
        # 拼接所有目标的类别标签,用于计算分类损失
        target_ids = torch.cat([v["class_labels"] for v in targets])
        
        # 拼接所有目标的边界框坐标,用于计算边界框回归损失
        target_bbox = torch.cat([v["boxes"] for v in targets])

        # Compute the classification cost. Contrary to the loss, we don't use the NLL,
        # but approximate it in 1 - proba[target class].
        # The 1 is a constant that doesn't change the matching, it can be ommitted.
        # 计算分类损失,使用 1 - proba[target class] 的近似值
        class_cost = -out_prob[:, target_ids]

        # Compute the L1 cost between boxes
        # 计算边界框之间的 L1 损失
        bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)

        # Compute the giou cost between boxes
        # 计算边界框之间的 giou 损失
        giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))

        # Final cost matrix
        # 组合所有损失成最终的损失矩阵
        cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
        cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()

        # Perform linear sum assignment for each batch element
        # 为每个批次元素执行线性求和分配
        sizes = [len(v["boxes"]) for v in targets]
        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
        
        # Return indices as tensors
        # 返回索引作为张量
        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
# Copied from transformers.models.detr.modeling_detr._upcast
def _upcast(t: Tensor) -> Tensor:
    # 如果输入张量是浮点类型,则根据需要将其转换为等效更高精度的类型,以防止数值溢出
    if t.is_floating_point():
        # 如果张量类型为 float32 或 float64,则直接返回,否则转换为 float32
        return t if t.dtype in (torch.float32, torch.float64) else t.float()
    else:
        # 如果张量类型为 int32 或 int64,则直接返回,否则转换为 int32
        return t if t.dtype in (torch.int32, torch.int64) else t.int()


# Copied from transformers.models.detr.modeling_detr.box_area
def box_area(boxes: Tensor) -> Tensor:
    """
    计算一组边界框的面积,这些边界框由它们的 (x1, y1, x2, y2) 坐标指定。

    Args:
        boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
            要计算面积的边界框。它们应该以 (x1, y1, x2, y2) 格式给出,要求 `0 <= x1 < x2` 和 `0 <= y1 < y2`。

    Returns:
        `torch.FloatTensor`: 包含每个边界框面积的张量。
    """
    # 将输入边界框张量转换为高精度类型,以防止数值溢出
    boxes = _upcast(boxes)
    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])


# Copied from transformers.models.detr.modeling_detr.box_iou
def box_iou(boxes1, boxes2):
    area1 = box_area(boxes1)
    area2 = box_area(boxes2)

    left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]
    right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]

    width_height = (right_bottom - left_top).clamp(min=0)  # [N,M,2]
    inter = width_height[:, :, 0] * width_height[:, :, 1]  # [N,M]

    union = area1[:, None] + area2 - inter

    iou = inter / union
    return iou, union


# Copied from transformers.models.detr.modeling_detr.generalized_box_iou
def generalized_box_iou(boxes1, boxes2):
    """
    从 https://giou.stanford.edu/ 中获取的广义 IoU。边界框应处于 [x0, y0, x1, y1](角点)格式。

    Returns:
        `torch.FloatTensor`: 一个 [N, M] 的成对矩阵,其中 N = len(boxes1),M = len(boxes2)
    """
    # 如果边界框退化将导致无穷大/无穷小结果,则进行早期检查
    if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
        raise ValueError(f"boxes1 必须在 [x0, y0, x1, y1](角点)格式内,但得到了 {boxes1}")
    if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
        raise ValueError(f"boxes2 必须在 [x0, y0, x1, y1](角点)格式内,但得到了 {boxes2}")
    iou, union = box_iou(boxes1, boxes2)

    top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
    bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])

    width_height = (bottom_right - top_left).clamp(min=0)  # [N,M,2]
    area = width_height[:, :, 0] * width_height[:, :, 1]

    return iou - (area - union) / area


# Copied from transformers.models.detr.modeling_detr._max_by_axis
def _max_by_axis(the_list):
    # type: (List[List[int]]) -> List[int]
    maxes = the_list[0]
    for sublist in the_list[1:]:
        for index, item in enumerate(sublist):
            maxes[index] = max(maxes[index], item)
    return maxes


# Copied from transformers.models.detr.modeling_detr.NestedTensor
# 定义一个名为 NestedTensor 的类,表示嵌套张量对象
class NestedTensor(object):
    # 初始化方法,接受张量列表和一个可选的遮罩张量作为参数
    def __init__(self, tensors, mask: Optional[Tensor]):
        self.tensors = tensors  # 初始化对象的张量属性
        self.mask = mask  # 初始化对象的遮罩属性

    # 将嵌套张量对象转移到指定设备的方法
    def to(self, device):
        cast_tensor = self.tensors.to(device)  # 转换张量到指定设备
        mask = self.mask
        if mask is not None:
            cast_mask = mask.to(device)  # 若存在遮罩张量,则也转移到指定设备
        else:
            cast_mask = None
        return NestedTensor(cast_tensor, cast_mask)  # 返回转移后的嵌套张量对象

    # 将嵌套张量对象解构成张量和遮罩张量的方法
    def decompose(self):
        return self.tensors, self.mask  # 返回嵌套张量对象内部的张量和遮罩张量

    # 返回嵌套张量对象的字符串表示形式
    def __repr__(self):
        return str(self.tensors)


# 从张量列表创建嵌套张量对象的方法,来自于 transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
    if tensor_list[0].ndim == 3:  # 检查列表中第一个张量的维度是否为3
        max_size = _max_by_axis([list(img.shape) for img in tensor_list])  # 计算张量列表中各张量在各维度上的最大尺寸
        batch_shape = [len(tensor_list)] + max_size  # 计算批量张量的形状
        batch_size, num_channels, height, width = batch_shape  # 解包批量张量的形状
        dtype = tensor_list[0].dtype  # 获取列表中第一个张量的数据类型
        device = tensor_list[0].device  # 获取列表中第一个张量所在的设备
        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)  # 创建全零张量作为批量张量
        mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)  # 创建全一遮罩张量
        # 将每个张量复制到批量张量中,并更新遮罩张量
        for img, pad_img, m in zip(tensor_list, tensor, mask):
            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
            m[: img.shape[1], : img.shape[2]] = False
    else:
        raise ValueError("Only 3-dimensional tensors are supported")  # 抛出错误,仅支持处理三维张量
    return NestedTensor(tensor, mask)  # 返回创建的嵌套张量对象

.\models\table_transformer\__init__.py

# 版权声明和许可证信息
# Copyright 2022 The HuggingFace Team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# 引入类型检查模块
from typing import TYPE_CHECKING

# 引入自定义的异常和模块延迟加载工具
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available

# 定义模块导入结构
_import_structure = {
    "configuration_table_transformer": [
        "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
        "TableTransformerConfig",
        "TableTransformerOnnxConfig",
    ]
}

# 检查是否存在 torch 库,如果不存在则引发自定义的异常
try:
    if not is_torch_available():
        raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
    pass
else:
    # 如果 torch 存在,扩展导入结构以包含额外的模块
    _import_structure["modeling_table_transformer"] = [
        "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
        "TableTransformerForObjectDetection",
        "TableTransformerModel",
        "TableTransformerPreTrainedModel",
    ]

# 如果在类型检查模式下
if TYPE_CHECKING:
    # 从配置模块中导入特定的类和常量
    from .configuration_table_transformer import (
        TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
        TableTransformerConfig,
        TableTransformerOnnxConfig,
    )

    # 再次检查 torch 是否可用,如果不可用则捕获异常
    try:
        if not is_torch_available():
            raise OptionalDependencyNotAvailable()
    except OptionalDependencyNotAvailable:
        pass
    else:
        # 如果 torch 可用,则从建模模块中导入特定的类和常量
        from .modeling_table_transformer import (
            TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
            TableTransformerForObjectDetection,
            TableTransformerModel,
            TableTransformerPreTrainedModel,
        )

# 如果不是类型检查模式
else:
    # 导入 sys 模块
    import sys

    # 将当前模块注册为一个延迟加载模块
    sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)

.\models\tapas\configuration_tapas.py

# coding=utf-8
# Copyright 2020 Google Research and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TAPAS configuration. Based on the BERT configuration with added parameters.

Hyperparameters are taken from run_task_main.py and hparam_utils.py of the original implementation. URLS:

- https://github.com/google-research/tapas/blob/master/tapas/run_task_main.py
- https://github.com/google-research/tapas/blob/master/tapas/utils/hparam_utils.py

"""

# 引入PretrainedConfig类,用于构建预训练配置
from ...configuration_utils import PretrainedConfig

# TAPAS预训练模型配置映射表,包含不同预训练模型及其配置文件的URL
TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
    "google/tapas-base-finetuned-sqa": (
        "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
    ),
    "google/tapas-base-finetuned-wtq": (
        "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
    ),
    "google/tapas-base-finetuned-wikisql-supervised": (
        "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
    ),
    "google/tapas-base-finetuned-tabfact": (
        "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
    ),
}

# TapasConfig类,继承自PretrainedConfig类,用于存储Tapas模型的配置信息
class TapasConfig(PretrainedConfig):
    r"""
    This is the configuration class to store the configuration of a [`TapasModel`]. It is used to instantiate a TAPAS
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the TAPAS
    [google/tapas-base-finetuned-sqa](https://huggingface.co/google/tapas-base-finetuned-sqa) architecture.

    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original
    implementation. Original implementation available at https://github.com/google-research/tapas/tree/master.

    Example:

    ```
    >>> from transformers import TapasModel, TapasConfig

    >>> # Initializing a default (SQA) Tapas configuration
    >>> configuration = TapasConfig()
    >>> # Initializing a model from the configuration
    >>> model = TapasModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    """

    # 指定模型类型为"tapas"
    model_type = "tapas"
    # 初始化函数,用于创建一个新的实例
    def __init__(
        self,
        vocab_size=30522,  # 词汇表大小,默认为30522
        hidden_size=768,  # 隐藏层大小,默认为768
        num_hidden_layers=12,  # 隐藏层数,默认为12
        num_attention_heads=12,  # 注意力头数,默认为12
        intermediate_size=3072,  # 中间层大小,默认为3072
        hidden_act="gelu",  # 隐藏层激活函数,默认为gelu
        hidden_dropout_prob=0.1,  # 隐藏层dropout概率,默认为0.1
        attention_probs_dropout_prob=0.1,  # 注意力机制dropout概率,默认为0.1
        max_position_embeddings=1024,  # 最大位置嵌入数,默认为1024
        type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10],  # 类型词汇表大小列表,默认值指定
        initializer_range=0.02,  # 初始化范围,默认为0.02
        layer_norm_eps=1e-12,  # 层归一化epsilon值,默认为1e-12
        pad_token_id=0,  # 填充token的ID,默认为0
        positive_label_weight=10.0,  # 正标签权重,默认为10.0
        num_aggregation_labels=0,  # 聚合标签数量,默认为0
        aggregation_loss_weight=1.0,  # 聚合损失权重,默认为1.0
        use_answer_as_supervision=None,  # 是否使用答案作为监督信号,默认为None
        answer_loss_importance=1.0,  # 答案损失重要性,默认为1.0
        use_normalized_answer_loss=False,  # 是否使用归一化答案损失,默认为False
        huber_loss_delta=None,  # Huber损失的δ值,默认为None
        temperature=1.0,  # 温度参数,默认为1.0
        aggregation_temperature=1.0,  # 聚合温度参数,默认为1.0
        use_gumbel_for_cells=False,  # 是否为单元使用Gumbel分布,默认为False
        use_gumbel_for_aggregation=False,  # 是否为聚合使用Gumbel分布,默认为False
        average_approximation_function="ratio",  # 平均逼近函数,默认为"ratio"
        cell_selection_preference=None,  # 单元选择偏好,默认为None
        answer_loss_cutoff=None,  # 答案损失截断值,默认为None
        max_num_rows=64,  # 最大行数,默认为64
        max_num_columns=32,  # 最大列数,默认为32
        average_logits_per_cell=False,  # 是否每个单元平均logits,默认为False
        select_one_column=True,  # 是否选择一个列,默认为True
        allow_empty_column_selection=False,  # 是否允许选择空列,默认为False
        init_cell_selection_weights_to_zero=False,  # 是否将单元选择权重初始化为零,默认为False
        reset_position_index_per_cell=True,  # 是否每个单元重置位置索引,默认为True
        disable_per_token_loss=False,  # 是否禁用每个token的损失,默认为False
        aggregation_labels=None,  # 聚合标签列表,默认为None
        no_aggregation_label_index=None,  # 无聚合标签索引,默认为None
        **kwargs,  # 其余关键字参数,用于接收未命名的参数
        ):
        # 调用父类的初始化方法,传入pad_token_id和其它关键字参数
        super().__init__(pad_token_id=pad_token_id, **kwargs)

        # 设置BERT模型的超参数
        self.vocab_size = vocab_size                     # 词汇表大小
        self.hidden_size = hidden_size                   # 隐藏层大小
        self.num_hidden_layers = num_hidden_layers       # 隐藏层数量
        self.num_attention_heads = num_attention_heads   # 注意力头数量
        self.hidden_act = hidden_act                     # 隐藏层激活函数
        self.intermediate_size = intermediate_size       # 中间层大小
        self.hidden_dropout_prob = hidden_dropout_prob   # 隐藏层dropout概率
        self.attention_probs_dropout_prob = attention_probs_dropout_prob  # 注意力dropout概率
        self.max_position_embeddings = max_position_embeddings  # 最大位置嵌入数
        self.type_vocab_sizes = type_vocab_sizes         # 类型词汇表大小
        self.initializer_range = initializer_range       # 初始化范围
        self.layer_norm_eps = layer_norm_eps             # 层归一化的epsilon值

        # 微调任务的超参数
        self.positive_label_weight = positive_label_weight  # 正类标签权重
        self.num_aggregation_labels = num_aggregation_labels  # 聚合标签数量
        self.aggregation_loss_weight = aggregation_loss_weight  # 聚合损失权重
        self.use_answer_as_supervision = use_answer_as_supervision  # 是否使用答案作为监督
        self.answer_loss_importance = answer_loss_importance  # 答案损失重要性
        self.use_normalized_answer_loss = use_normalized_answer_loss  # 是否使用归一化答案损失
        self.huber_loss_delta = huber_loss_delta         # Huber损失的δ参数
        self.temperature = temperature                   # 温度参数
        self.aggregation_temperature = aggregation_temperature  # 聚合温度参数
        self.use_gumbel_for_cells = use_gumbel_for_cells  # 是否为单元使用Gumbel分布
        self.use_gumbel_for_aggregation = use_gumbel_for_aggregation  # 是否为聚合使用Gumbel分布
        self.average_approximation_function = average_approximation_function  # 平均逼近函数
        self.cell_selection_preference = cell_selection_preference  # 单元选择偏好
        self.answer_loss_cutoff = answer_loss_cutoff    # 答案损失截断
        self.max_num_rows = max_num_rows                # 最大行数
        self.max_num_columns = max_num_columns          # 最大列数
        self.average_logits_per_cell = average_logits_per_cell  # 每个单元的平均logits
        self.select_one_column = select_one_column      # 是否选择一个列
        self.allow_empty_column_selection = allow_empty_column_selection  # 是否允许空列选择
        self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero  # 初始化单元选择权重为零
        self.reset_position_index_per_cell = reset_position_index_per_cell  # 每个单元重置位置索引
        self.disable_per_token_loss = disable_per_token_loss  # 是否禁用每个token的损失

        # 聚合的超参数
        self.aggregation_labels = aggregation_labels    # 聚合标签
        self.no_aggregation_label_index = no_aggregation_label_index  # 无聚合标签索引

        # 如果聚合标签是字典,则将其键转换为整数类型
        if isinstance(self.aggregation_labels, dict):
            self.aggregation_labels = {int(k): v for k, v in aggregation_labels.items()}

.\models\tapas\convert_tapas_original_tf_checkpoint_to_pytorch.py

# 引入命令行参数解析模块
import argparse

# 从transformers库中引入各种Tapas相关类和函数
from transformers import (
    TapasConfig,
    TapasForMaskedLM,
    TapasForQuestionAnswering,
    TapasForSequenceClassification,
    TapasModel,
    TapasTokenizer,
    load_tf_weights_in_tapas,
)
# 从transformers的工具模块中引入日志记录功能
from transformers.utils import logging

# 设置日志记录的详细程度为info级别
logging.set_verbosity_info()

# 定义函数convert_tf_checkpoint_to_pytorch,用于将TensorFlow模型转换为PyTorch模型
def convert_tf_checkpoint_to_pytorch(
    task, reset_position_index_per_cell, tf_checkpoint_path, tapas_config_file, pytorch_dump_path
):
    # 初始化PyTorch模型配置
    # 如果要转换使用绝对位置嵌入的检查点,请确保将TapasConfig的reset_position_index_per_cell设置为False。
    
    # 从json文件加载TapasConfig配置
    config = TapasConfig.from_json_file(tapas_config_file)
    # 设置绝对/相对位置嵌入参数
    config.reset_position_index_per_cell = reset_position_index_per_cell

    # 根据任务设置TapasConfig的其余参数以及模型类型
    if task == "SQA":
        model = TapasForQuestionAnswering(config=config)
    elif task == "WTQ":
        # WTQ任务的特定配置
        config.num_aggregation_labels = 4
        config.use_answer_as_supervision = True
        config.answer_loss_cutoff = 0.664694
        config.cell_selection_preference = 0.207951
        config.huber_loss_delta = 0.121194
        config.init_cell_selection_weights_to_zero = True
        config.select_one_column = True
        config.allow_empty_column_selection = False
        config.temperature = 0.0352513

        model = TapasForQuestionAnswering(config=config)
    elif task == "WIKISQL_SUPERVISED":
        # WIKISQL_SUPERVISED任务的特定配置
        config.num_aggregation_labels = 4
        config.use_answer_as_supervision = False
        config.answer_loss_cutoff = 36.4519
        config.cell_selection_preference = 0.903421
        config.huber_loss_delta = 222.088
        config.init_cell_selection_weights_to_zero = True
        config.select_one_column = True
        config.allow_empty_column_selection = True
        config.temperature = 0.763141

        model = TapasForQuestionAnswering(config=config)
    elif task == "TABFACT":
        # TABFACT任务使用序列分类模型
        model = TapasForSequenceClassification(config=config)
    elif task == "MLM":
        # MLM任务使用遮蔽语言建模模型
        model = TapasForMaskedLM(config=config)
    # 如果任务是INTERMEDIATE_PRETRAINING,则创建一个TapasModel模型对象,使用给定的配置
    elif task == "INTERMEDIATE_PRETRAINING":
        model = TapasModel(config=config)
    # 如果任务不是INTERMEDIATE_PRETRAINING,抛出异常,显示不支持的任务类型
    else:
        raise ValueError(f"Task {task} not supported.")

    # 打印消息,显示正在根据配置构建PyTorch模型
    print(f"Building PyTorch model from configuration: {config}")

    # 从TensorFlow的检查点中加载权重到TapasModel模型中
    load_tf_weights_in_tapas(model, config, tf_checkpoint_path)

    # 保存PyTorch模型(包括权重和配置)到指定路径
    print(f"Save PyTorch model to {pytorch_dump_path}")
    model.save_pretrained(pytorch_dump_path)

    # 保存tokenizer文件到指定路径
    print(f"Save tokenizer files to {pytorch_dump_path}")
    # 创建一个TapasTokenizer对象,使用给定的词汇文件和最大长度,保存到pytorch_dump_path
    tokenizer = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512)
    tokenizer.save_pretrained(pytorch_dump_path)

    # 打印消息,显示是否使用了相对位置嵌入
    print("Used relative position embeddings:", model.config.reset_position_index_per_cell)
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    # 创建一个参数解析器对象

    # Required parameters
    parser.add_argument(
        "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
    )
    # 添加一个必选参数 --task,用于指定模型任务类型,默认为 "SQA"

    parser.add_argument(
        "--reset_position_index_per_cell",
        default=False,
        action="store_true",
        help="Whether to use relative position embeddings or not. Defaults to True.",
    )
    # 添加一个可选参数 --reset_position_index_per_cell,用于控制是否使用相对位置嵌入,默认为 False

    parser.add_argument(
        "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
    )
    # 添加一个必选参数 --tf_checkpoint_path,指定 TensorFlow checkpoint 的路径

    parser.add_argument(
        "--tapas_config_file",
        default=None,
        type=str,
        required=True,
        help=(
            "The config json file corresponding to the pre-trained TAPAS model. \n"
            "This specifies the model architecture."
        ),
    )
    # 添加一个必选参数 --tapas_config_file,指定预训练 TAPAS 模型的配置文件路径,用于指定模型架构

    parser.add_argument(
        "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
    )
    # 添加一个必选参数 --pytorch_dump_path,指定输出 PyTorch 模型的路径

    args = parser.parse_args()
    # 解析命令行参数并存储到 args 变量中

    convert_tf_checkpoint_to_pytorch(
        args.task,
        args.reset_position_index_per_cell,
        args.tf_checkpoint_path,
        args.tapas_config_file,
        args.pytorch_dump_path,
    )
    # 调用函数 convert_tf_checkpoint_to_pytorch,传入解析得到的参数,执行 TensorFlow 到 PyTorch 模型的转换

.\models\tapas\modeling_tapas.py

# coding=utf-8
# 声明版权和许可信息,该文件使用 Apache License, Version 2.0 授权
# 详细许可信息可在 http://www.apache.org/licenses/LICENSE-2.0 获取
#
# 如果没有适用法律要求或书面同意,本软件按 "原样" 提供,不提供任何形式的保证或条件
# 详见许可文件以了解更多信息。

"""PyTorch TAPAS model."""
# 导入必要的库和模块
import enum
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union

import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss

# 导入 TAPAS 模型相关的组件和功能
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import (
    apply_chunking_to_forward,
    find_pruneable_heads_and_indices,
    is_torch_greater_or_equal_than_1_12,
    prune_linear_layer,
)
from ...utils import (
    ModelOutput,
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    logging,
    replace_return_docstrings,
)
# 导入 TAPAS 的配置文件
from .configuration_tapas import TapasConfig

# 获取日志记录器
logger = logging.get_logger(__name__)

# 检查 torch 版本是否大于等于 1.12.0,如果不是则发出警告
if not is_torch_greater_or_equal_than_1_12:
    logger.warning(
        f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
        "TapasModel. Please upgrade torch."
    )

# 以下是为文档和模型下载提供的配置和检查点信息
_CONFIG_FOR_DOC = "TapasConfig"
_CHECKPOINT_FOR_DOC = "google/tapas-base"

# 定义预训练的 TAPAS 模型存档列表,包括大、基础、小、迷你和微型模型
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [
    # large models
    "google/tapas-large",
    "google/tapas-large-finetuned-sqa",
    "google/tapas-large-finetuned-wtq",
    "google/tapas-large-finetuned-wikisql-supervised",
    "google/tapas-large-finetuned-tabfact",
    # base models
    "google/tapas-base",
    "google/tapas-base-finetuned-sqa",
    "google/tapas-base-finetuned-wtq",
    "google/tapas-base-finetuned-wikisql-supervised",
    "google/tapas-base-finetuned-tabfact",
    # small models
    "google/tapas-small",
    "google/tapas-small-finetuned-sqa",
    "google/tapas-small-finetuned-wtq",
    "google/tapas-small-finetuned-wikisql-supervised",
    "google/tapas-small-finetuned-tabfact",
    # mini models
    "google/tapas-mini",
    "google/tapas-mini-finetuned-sqa",
    "google/tapas-mini-finetuned-wtq",
    "google/tapas-mini-finetuned-wikisql-supervised",
    "google/tapas-mini-finetuned-tabfact",
    # tiny models
    "google/tapas-tiny",
    "google/tapas-tiny-finetuned-sqa",
    "google/tapas-tiny-finetuned-wtq",
    "google/tapas-tiny-finetuned-wikisql-supervised",
    "google/tapas-tiny-finetuned-tabfact",
    # 查看所有 TAPAS 模型:https://huggingface.co/models?filter=tapas
]
# 设置一个小值,用于避免零除错误
EPSILON_ZERO_DIVISION = 1e-10
# 定义一个足够接近负无穷大的值,用于表示对数零
CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0


@dataclass
class TableQuestionAnsweringOutput(ModelOutput):
    """
    [`TapasForQuestionAnswering`] 的输出类型。

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
            总损失,包括层次单元选择对数似然损失和(可选)半监督回归损失,以及(可选)聚合操作的监督损失。
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            单元选择头部的预测分数,每个标记的分数。
        logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
            聚合头部的预测分数,每种聚合运算符的分数。
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            模型在每一层输出的隐藏状态,包括每一层的输出和初始嵌入输出。
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            self-attention 头部中的注意力权重,用于计算加权平均值。

    """

    loss: Optional[torch.FloatTensor] = None
    logits: torch.FloatTensor = None
    logits_aggregation: torch.FloatTensor = None
    hidden_states: Optional[Tuple[torch.FloatTensor]] = None
    attentions: Optional[Tuple[torch.FloatTensor]] = None


def load_tf_weights_in_tapas(model, config, tf_checkpoint_path):
    """
    将 TensorFlow 的检查点加载到 PyTorch 模型中。这是从 load_tf_weights_in_bert 改编而来的函数。

    - 添加单元选择和聚合头部
    - 考虑额外的标记类型嵌入层
    """
    try:
        import re  # 导入正则表达式模块
        import numpy as np  # 导入 NumPy 库
        import tensorflow as tf  # 导入 TensorFlow 库
    except ImportError:
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise
    tf_path = os.path.abspath(tf_checkpoint_path)  # 获取 TensorFlow 检查点文件的绝对路径
    logger.info(f"Converting TensorFlow checkpoint from {tf_path}")  # 记录日志,显示正在从 TensorFlow 检查点进行转换
    # 从 TF 模型中加载权重
    init_vars = tf.train.list_variables(tf_path)
    names = []  # 初始化空列表,用于存储变量名
    arrays = []  # 初始化空列表,用于存储变量值
    # 对于每个初始化变量的名称和形状,在日志中记录加载 TensorFlow 权重的信息
    for name, shape in init_vars:
        # 使用 logger.info 输出日志信息,显示正在加载的 TensorFlow 权重的名称和形状
        logger.info(f"Loading TF weight {name} with shape {shape}")
        # 使用 TensorFlow 的 API 从指定路径 tf_path 中加载变量 name 的值,并将其存入 array
        array = tf.train.load_variable(tf_path, name)
        # 将当前加载的变量名 name 添加到 names 列表中
        names.append(name)
        # 将加载的权重数组 array 添加到 arrays 列表中
        arrays.append(array)
    
    # 返回已加载了初始变量的模型
    return model
# TapasEmbeddings 类定义,用于构建从单词、位置和标记类型嵌入生成的嵌入层。
class TapasEmbeddings(nn.Module):
    """
    Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
    additional token type embeddings to encode tabular structure.
    """

    def __init__(self, config):
        # 调用父类构造函数初始化模块
        super().__init__()
        
        # 不包括 config.disabled_features 和 config.disable_position_embeddings 这两个原始实现中的特性
        # 单词嵌入层,vocab_size 是词汇表大小,hidden_size 是嵌入的维度,padding_idx 是填充标记的索引
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
        
        # 位置嵌入层,max_position_embeddings 是最大位置编码数量,hidden_size 是嵌入的维度
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        
        # 标记类型嵌入层,根据配置中的 type_vocab_sizes 列表创建多个嵌入层
        for i, type_vocab_sizes in enumerate(config.type_vocab_sizes):
            name = f"token_type_embeddings_{i}"
            # 动态设置模块属性,创建名为 token_type_embeddings_i 的嵌入层
            setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size))

        # 记录标记类型嵌入层的数量
        self.number_of_token_type_embeddings = len(config.type_vocab_sizes)

        # LayerNorm 不使用蛇形命名以保持与 TensorFlow 模型变量名一致,可以加载任何 TensorFlow 检查点文件
        # LayerNorm 层,将隐藏层的输出进行归一化,eps 是用于数值稳定性的小值
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        
        # Dropout 层,用于在训练过程中随机断开神经元连接,防止过拟合
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

        # 存储配置信息,方便模型加载和保存时使用
        self.config = config
    def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
        # 如果传入了 input_ids 参数,则获取其形状;否则获取 inputs_embeds 的形状去掉最后一个维度
        if input_ids is not None:
            input_shape = input_ids.size()
        else:
            input_shape = inputs_embeds.size()[:-1]

        # 获取序列长度
        seq_length = input_shape[1]
        # 获取输入数据所在设备
        device = input_ids.device if input_ids is not None else inputs_embeds.device

        # 如果未提供 position_ids 参数,则创建绝对位置嵌入
        if position_ids is None:
            # 创建长度为 seq_length 的长整型张量,设备为指定设备
            position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
            # 将位置张量扩展到与输入形状相同
            position_ids = position_ids.unsqueeze(0).expand(input_shape)
            
            # 当 self.config.reset_position_index_per_cell 设置为 True 时,创建相对位置嵌入
            if self.config.reset_position_index_per_cell:
                # 获取 token_type_ids 的第二个维度作为列索引,形状为 (batch_size, seq_len)
                col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
                # 获取 token_type_ids 的第三个维度作为行索引,形状为 (batch_size, seq_len)
                row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
                # 计算全局索引,形状为 (batch_size, seq_len)
                full_index = ProductIndexMap(col_index, row_index)
                # 计算每个单元格的第一个绝对位置,形状为 (max_rows * max_columns,)
                first_position_per_segment = reduce_min(position_ids, full_index)[0]
                # 获取每个 token 的单元格的第一个绝对位置,形状为 (batch_size, seq_len)
                first_position = gather(first_position_per_segment, full_index)
                # 创建长度为 seq_length 的长整型张量,设备为指定设备,表示相对位置
                position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
                # 计算相对位置并限制在最大位置嵌入之内
                position_ids = torch.min(
                    torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position
                )

        # 如果未提供 token_type_ids 参数,则创建全零张量作为 token_type_ids
        if token_type_ids is None:
            token_type_ids = torch.zeros(
                (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device
            )

        # 如果未提供 inputs_embeds 参数,则使用 input_ids 获取词嵌入
        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)

        # 获取位置嵌入
        position_embeddings = self.position_embeddings(position_ids)

        # 计算总的嵌入向量
        embeddings = inputs_embeds + position_embeddings

        # 添加每个 token 类型的嵌入向量
        for i in range(self.number_of_token_type_embeddings):
            name = f"token_type_embeddings_{i}"
            embeddings += getattr(self, name)(token_type_ids[:, :, i])

        # 应用 LayerNormalization
        embeddings = self.LayerNorm(embeddings)
        # 应用 Dropout
        embeddings = self.dropout(embeddings)
        # 返回结果嵌入向量
        return embeddings
# 定义自注意力机制的类,继承自 nn.Module
class TapasSelfAttention(nn.Module):
    # 初始化函数,接受一个配置对象 config
    def __init__(self, config):
        super().__init__()
        # 检查隐藏层大小是否能被注意力头数整除,若不能且配置对象没有嵌入大小属性,则抛出异常
        if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
            raise ValueError(
                f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
                f"heads {config.num_attention_heads}"
            )

        # 设置注意力头数和每个注意力头的大小
        self.num_attention_heads = config.num_attention_heads
        self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
        self.all_head_size = self.num_attention_heads * self.attention_head_size

        # 创建用于查询、键和值的线性映射层
        self.query = nn.Linear(config.hidden_size, self.all_head_size)
        self.key = nn.Linear(config.hidden_size, self.all_head_size)
        self.value = nn.Linear(config.hidden_size, self.all_head_size)

        # 使用配置中的注意力概率丢弃率创建 dropout 层
        self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
        # 标记是否为解码器(用于 Transformer 模型)
        self.is_decoder = config.is_decoder

    # 将输入张量转换为注意力分数矩阵的形状
    def transpose_for_scores(self, x):
        new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
        x = x.view(*new_x_shape)
        return x.permute(0, 2, 1, 3)

    # 前向传播函数,接受隐藏状态和其他可选参数
    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_value=None,
        output_attentions=False,
        ):
        # 从隐藏状态生成查询层
        mixed_query_layer = self.query(hidden_states)

        # 如果这是一个跨注意力模块,keys和values来自一个编码器;
        # 注意力遮罩应确保编码器的填充标记不被注意到。
        is_cross_attention = encoder_hidden_states is not None

        if is_cross_attention and past_key_value is not None:
            # 重复使用过去的键值和跨注意力
            key_layer = past_key_value[0]
            value_layer = past_key_value[1]
            attention_mask = encoder_attention_mask
        elif is_cross_attention:
            # 从编码器隐藏状态生成keys和values,并转置以供得分计算
            key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
            value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
            attention_mask = encoder_attention_mask
        elif past_key_value is not None:
            # 从隐藏状态生成keys和values,并与过去的键值连接起来
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))
            key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
            value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
        else:
            # 从隐藏状态生成keys和values
            key_layer = self.transpose_for_scores(self.key(hidden_states))
            value_layer = self.transpose_for_scores(self.value(hidden_states))

        # 将mixed_query_layer转置以供得分计算
        query_layer = self.transpose_for_scores(mixed_query_layer)

        if self.is_decoder:
            # 如果是解码器,保存当前键值对
            past_key_value = (key_layer, value_layer)

        # 计算"query"和"key"之间的点积,得到原始注意力分数
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(self.attention_head_size)
        if attention_mask is not None:
            # 应用注意力遮罩(预先计算好的,用于TapasModel的所有层的forward()函数)
            attention_scores = attention_scores + attention_mask

        # 将注意力分数归一化为概率
        attention_probs = nn.functional.softmax(attention_scores, dim=-1)

        # 使用dropout随机丢弃整个token的注意力权重,这在原始Transformer论文中有描述
        attention_probs = self.dropout(attention_probs)

        # 如果需要,对注意力头进行掩码处理
        if head_mask is not None:
            attention_probs = attention_probs * head_mask

        # 计算上下文向量,通过注意力概率和values的加权和
        context_layer = torch.matmul(attention_probs, value_layer)

        # 调整上下文向量的形状以适应后续处理
        context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
        context_layer = context_layer.view(*new_context_layer_shape)

        # 准备输出结果,包括上下文向量和可能的注意力权重
        outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
        if self.is_decoder:
            outputs = outputs + (past_key_value,)
        return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class TapasSelfOutput(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 初始化一个全连接层,输入和输出维度都是config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 初始化一个LayerNorm层,对输入进行归一化,eps是归一化过程中的稳定性参数
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # 初始化一个Dropout层,用于随机置0输入张量的元素以防止过拟合
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 对输入的hidden_states进行全连接操作
        hidden_states = self.dense(hidden_states)
        # 对全连接后的结果进行dropout操作
        hidden_states = self.dropout(hidden_states)
        # 对dropout后的结果和输入input_tensor进行残差连接并归一化
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        return hidden_states


class TapasAttention(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 初始化自注意力层和输出层
        self.self = TapasSelfAttention(config)
        self.output = TapasSelfOutput(config)
        # 初始化一个集合用于存储需要剪枝的注意力头
        self.pruned_heads = set()

    # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
    def prune_heads(self, heads):
        if len(heads) == 0:
            return
        # 调用剪枝函数,找到需要剪枝的头部并返回索引
        heads, index = find_pruneable_heads_and_indices(
            heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
        )

        # 对自注意力层的query、key、value部分进行线性层剪枝
        self.self.query = prune_linear_layer(self.self.query, index)
        self.self.key = prune_linear_layer(self.self.key, index)
        self.self.value = prune_linear_layer(self.self.value, index)
        # 对输出层的全连接层进行剪枝
        self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)

        # 更新超参数并记录剪枝的头部
        self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
        self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
        self.pruned_heads = self.pruned_heads.union(heads)

    # Copied from transformers.models.bert.modeling_bert.BertAttention.forward
    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        output_attentions: Optional[bool] = False,
    ) -> Tuple[torch.Tensor]:
        # 调用自注意力层的forward方法
        self_outputs = self.self(
            hidden_states,
            attention_mask,
            head_mask,
            encoder_hidden_states,
            encoder_attention_mask,
            past_key_value,
            output_attentions,
        )
        # 将自注意力层的输出作为输入,调用输出层的forward方法
        attention_output = self.output(self_outputs[0], hidden_states)
        # 如果需要输出注意力信息,则将其添加到输出中
        outputs = (attention_output,) + self_outputs[1:]  # add attentions if we output them
        return outputs


# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class TapasIntermediate(nn.Module):
    # 初始化方法,接受一个配置对象作为参数
    def __init__(self, config):
        # 调用父类的初始化方法
        super().__init__()
        # 创建一个线性层,输入维度为 config.hidden_size,输出维度为 config.intermediate_size
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
        
        # 检查 config.hidden_act 是否为字符串类型,如果是则从预定义的映射 ACT2FN 中获取对应的激活函数,赋值给 self.intermediate_act_fn
        if isinstance(config.hidden_act, str):
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
        else:
            # 如果 config.hidden_act 不是字符串类型,则直接将其赋值给 self.intermediate_act_fn
            self.intermediate_act_fn = config.hidden_act

    # 前向传播方法,接受一个 torch.Tensor 类型的隐藏状态作为输入,返回一个 torch.Tensor 类型的隐藏状态
    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 将输入的隐藏状态经过线性层 self.dense 处理,得到新的隐藏状态
        hidden_states = self.dense(hidden_states)
        # 将经过线性层处理后的隐藏状态再经过激活函数 self.intermediate_act_fn 处理,得到最终的隐藏状态
        hidden_states = self.intermediate_act_fn(hidden_states)
        # 返回最终的隐藏状态
        return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
# 定义一个名为 TapasOutput 的类,继承自 nn.Module
class TapasOutput(nn.Module):
    # 初始化方法,接收一个 config 对象作为参数
    def __init__(self, config):
        super().__init__()
        # 创建一个全连接层,输入大小为 config.intermediate_size,输出大小为 config.hidden_size
        self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
        # 创建一个 LayerNorm 层,输入大小为 config.hidden_size,eps 为 config.layer_norm_eps
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        # 创建一个 Dropout 层,丢弃概率为 config.hidden_dropout_prob
        self.dropout = nn.Dropout(config.hidden_dropout_prob)

    # 前向传播方法,接收 hidden_states 和 input_tensor 作为输入,返回 torch.Tensor
    def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
        # 将 hidden_states 输入到全连接层 dense 中
        hidden_states = self.dense(hidden_states)
        # 对全连接层的输出进行 Dropout
        hidden_states = self.dropout(hidden_states)
        # 将 Dropout 后的输出与 input_tensor 相加,并输入 LayerNorm 层中
        hidden_states = self.LayerNorm(hidden_states + input_tensor)
        # 返回 LayerNorm 层的输出作为最终的隐藏状态输出
        return hidden_states


# 定义一个名为 TapasLayer 的类,继承自 nn.Module
class TapasLayer(nn.Module):
    # 初始化方法,接收一个 config 对象作为参数
    def __init__(self, config):
        super().__init__()
        # 设置 chunk_size_feed_forward 属性为 config.chunk_size_feed_forward
        self.chunk_size_feed_forward = config.chunk_size_feed_forward
        # 设置 seq_len_dim 属性为 1
        self.seq_len_dim = 1
        # 创建一个 TapasAttention 对象,并传入 config 对象
        self.attention = TapasAttention(config)
        # 设置 is_decoder 属性为 config.is_decoder
        self.is_decoder = config.is_decoder
        # 设置 add_cross_attention 属性为 config.add_cross_attention
        self.add_cross_attention = config.add_cross_attention
        # 如果 add_cross_attention 为 True,则执行以下操作
        if self.add_cross_attention:
            # 如果不是 decoder 模型,则抛出 ValueError 异常
            if not self.is_decoder:
                raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
            # 创建一个 TapasAttention 对象作为 crossattention,并传入 config 对象
            self.crossattention = TapasAttention(config)
        # 创建一个 TapasIntermediate 对象,并传入 config 对象
        self.intermediate = TapasIntermediate(config)
        # 创建一个 TapasOutput 对象,并传入 config 对象
        self.output = TapasOutput(config)

    # Copied from transformers.models.bert.modeling_bert.BertLayer.forward
    # 前向传播方法,接收多个输入参数,返回 torch.Tensor
    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        output_attentions: Optional[bool] = False,
    ) -> Tuple[torch.Tensor]:
        # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
        self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
        # Perform self-attention on the input hidden states with optional cached key/values
        self_attention_outputs = self.attention(
            hidden_states,
            attention_mask,
            head_mask,
            output_attentions=output_attentions,
            past_key_value=self_attn_past_key_value,
        )
        # Extract the attention output from self-attention outputs
        attention_output = self_attention_outputs[0]

        # if decoder, the last output is tuple of self-attn cache
        if self.is_decoder:
            # Extract all outputs except the first (attention_output) and the last (present_key_value)
            outputs = self_attention_outputs[1:-1]
            # Extract the present key/value tuple from self-attention outputs
            present_key_value = self_attention_outputs[-1]
        else:
            # Include self attentions if attention weights are output
            outputs = self_attention_outputs[1:]

        cross_attn_present_key_value = None
        if self.is_decoder and encoder_hidden_states is not None:
            if not hasattr(self, "crossattention"):
                # Raise an error if cross-attention is expected but not instantiated
                raise ValueError(
                    f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
                    " by setting `config.add_cross_attention=True`"
                )

            # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
            cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
            # Perform cross-attention using self-attention output and encoder hidden states
            cross_attention_outputs = self.crossattention(
                attention_output,
                attention_mask,
                head_mask,
                encoder_hidden_states,
                encoder_attention_mask,
                cross_attn_past_key_value,
                output_attentions,
            )
            # Extract the attention output from cross-attention outputs
            attention_output = cross_attention_outputs[0]
            # Add cross attentions to the existing outputs
            outputs = outputs + cross_attention_outputs[1:-1]

            # Add cross-attn cache to positions 3,4 of present_key_value tuple
            cross_attn_present_key_value = cross_attention_outputs[-1]
            present_key_value = present_key_value + cross_attn_present_key_value

        # Apply feed-forward chunking to the attention output
        layer_output = apply_chunking_to_forward(
            self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
        )
        # Append layer_output to outputs
        outputs = (layer_output,) + outputs

        # if decoder, return the attn key/values as the last output
        if self.is_decoder:
            # Append present_key_value to outputs as the last element
            outputs = outputs + (present_key_value,)

        # Return the final outputs
        return outputs

    # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
    def feed_forward_chunk(self, attention_output):
        # Apply intermediate layer to attention_output
        intermediate_output = self.intermediate(attention_output)
        # Apply output layer to intermediate_output and attention_output
        layer_output = self.output(intermediate_output, attention_output)
        # Return the final layer output
        return layer_output
# 定义一个名为 TapasEncoder 的神经网络模块类,继承自 nn.Module
class TapasEncoder(nn.Module):
    # 初始化方法,接受一个 config 参数
    def __init__(self, config):
        # 调用父类的初始化方法
        super().__init__()
        # 将传入的 config 参数保存为类的一个属性
        self.config = config
        # 创建一个由多个 TapasLayer 组成的层列表,列表长度为 config.num_hidden_layers
        self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)])
        # 设置梯度检查点标志为 False
        self.gradient_checkpointing = False

    # 前向传播方法
    def forward(
        self,
        hidden_states,
        attention_mask=None,
        head_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
        past_key_values=None,
        use_cache=None,
        output_attentions=False,
        output_hidden_states=False,
        return_dict=True,
    ):
        # 如果设置了输出隐藏状态,则初始化一个空元组用于存储所有隐藏状态
        all_hidden_states = () if output_hidden_states else None
        # 如果设置了输出注意力权重,则初始化一个空元组用于存储所有注意力权重
        all_attentions = () if output_attentions else None
        
        # 遍历每个层模块
        for i, layer_module in enumerate(self.layer):
            # 如果需要输出隐藏状态,则将当前的隐藏状态添加到 all_hidden_states 中
            if output_hidden_states:
                all_hidden_states = all_hidden_states + (hidden_states,)

            # 获取当前层对应的头部掩码(如果有的话)
            layer_head_mask = head_mask[i] if head_mask is not None else None

            # 如果启用梯度检查点且正在训练阶段,则使用梯度检查点功能进行前向传播计算
            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(
                    layer_module.__call__,
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
                    past_key_values,
                    output_attentions,
                )
            else:
                # 否则直接调用当前层的前向传播方法进行计算
                layer_outputs = layer_module(
                    hidden_states,
                    attention_mask,
                    layer_head_mask,
                    encoder_hidden_states,
                    encoder_attention_mask,
                    past_key_values,
                    output_attentions,
                )
            
            # 更新隐藏状态为当前层的输出的第一个元素
            hidden_states = layer_outputs[0]
            
            # 如果需要输出注意力权重,则将当前层的注意力权重添加到 all_attentions 中
            if output_attentions:
                all_attentions = all_attentions + (layer_outputs[1],)
        
        # 如果需要输出隐藏状态,则将最终的隐藏状态添加到 all_hidden_states 中
        if output_hidden_states:
            all_hidden_states = all_hidden_states + (hidden_states,)
        
        # 如果 return_dict 参数为 False,则返回一个包含所有非空结果的元组
        if not return_dict:
            return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
        
        # 否则,返回一个 BaseModelOutput 对象,包含最终的隐藏状态、所有隐藏状态和所有注意力权重
        return BaseModelOutput(
            last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
        )


# 从 transformers.models.bert.modeling_bert.BertPooler 复制而来的 TapasPooler 类
class TapasPooler(nn.Module):
    # 初始化方法,接受一个 config 参数
    def __init__(self, config):
        # 调用父类的初始化方法
        super().__init__()
        # 创建一个线性层,输入维度和输出维度均为 config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 创建一个 Tanh 激活函数
        self.activation = nn.Tanh()

    # 前向传播方法,接收一个名为 hidden_states 的输入张量
    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 取出隐藏状态张量的第一个 token 对应的向量
        first_token_tensor = hidden_states[:, 0]
        # 将该向量输入全连接层进行线性变换
        pooled_output = self.dense(first_token_tensor)
        # 将线性变换的结果输入 Tanh 激活函数
        pooled_output = self.activation(pooled_output)
        # 返回池化后的输出张量
        return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas
class TapasPredictionHeadTransform(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 定义一个全连接层,输入和输出维度都是config.hidden_size
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        # 根据配置选择激活函数,如果是字符串则从预定义映射表中获取对应函数,否则直接使用配置中的函数
        if isinstance(config.hidden_act, str):
            self.transform_act_fn = ACT2FN[config.hidden_act]
        else:
            self.transform_act_fn = config.hidden_act
        # Layer normalization 层,输入维度为config.hidden_size,设置epsilon为config.layer_norm_eps
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # 输入hidden_states经过全连接层dense,输出维度不变
        hidden_states = self.dense(hidden_states)
        # 应用激活函数transform_act_fn到全连接层输出上
        hidden_states = self.transform_act_fn(hidden_states)
        # 对输出进行Layer normalization
        hidden_states = self.LayerNorm(hidden_states)
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas
class TapasLMPredictionHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 使用TapasPredictionHeadTransform对应的配置初始化transform属性
        self.transform = TapasPredictionHeadTransform(config)

        # 输出权重与输入embedding相同,但每个token有一个单独的偏置
        self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)

        # 定义一个偏置参数,维度为config.vocab_size
        self.bias = nn.Parameter(torch.zeros(config.vocab_size))

        # 需要链接两个变量以便偏置在resize_token_embeddings时被正确调整大小
        self.decoder.bias = self.bias

    def forward(self, hidden_states):
        # 输入hidden_states经过transform层处理
        hidden_states = self.transform(hidden_states)
        # 经过线性层decoder,输出维度为config.vocab_size
        hidden_states = self.decoder(hidden_states)
        return hidden_states


# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas
class TapasOnlyMLMHead(nn.Module):
    def __init__(self, config):
        super().__init__()
        # 使用TapasLMPredictionHead对应的配置初始化predictions属性
        self.predictions = TapasLMPredictionHead(config)

    def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
        # 输入sequence_output经过predictions层处理,得到预测分数
        prediction_scores = self.predictions(sequence_output)
        return prediction_scores


class TapasPreTrainedModel(PreTrainedModel):
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

    # 使用TapasConfig作为配置类
    config_class = TapasConfig
    # base_model_prefix为模型前缀字符串
    base_model_prefix = "tapas"
    # 支持梯度检查点
    supports_gradient_checkpointing = True

    # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
    # 初始化神经网络模块的权重
    def _init_weights(self, module):
        """Initialize the weights"""
        # 如果模块是线性层(全连接层)
        if isinstance(module, nn.Linear):
            # 使用正态分布随机初始化权重,均值为0,标准差为self.config.initializer_range
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果存在偏置项,将其初始化为零
            if module.bias is not None:
                module.bias.data.zero_()
        # 如果模块是嵌入层
        elif isinstance(module, nn.Embedding):
            # 使用正态分布随机初始化权重,均值为0,标准差为self.config.initializer_range
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
            # 如果定义了填充索引,将填充索引对应的权重初始化为零
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
        # 如果模块是层归一化层
        elif isinstance(module, nn.LayerNorm):
            # 初始化偏置项为零
            module.bias.data.zero_()
            # 初始化权重为全1
            module.weight.data.fill_(1.0)
# TAPAS_START_DOCSTRING 的文档字符串,用于说明 TapasModel 类的继承关系和用法
TAPAS_START_DOCSTRING = r"""
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`TapasConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""



# TAPAS_INPUTS_DOCSTRING 的文档字符串,暂未提供具体内容
TAPAS_INPUTS_DOCSTRING = r"""
    
"""
    Args:
        input_ids (`torch.LongTensor` of shape `({0})`):
            # 输入序列的标记索引,在词汇表中的索引。可以使用 `AutoTokenizer` 获取这些索引。参见 `PreTrainedTokenizer.encode` 和 `PreTrainedTokenizer.__call__` 获取更多详情。
            # [什么是输入 ID?](../glossary#input-ids)
        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
            # 遮罩,用于在填充标记索引上避免执行注意力操作。遮罩的值选择在 `[0, 1]`:

            # - 1 表示那些**未被遮罩**的标记,
            # - 0 表示那些**被遮罩**的标记。

            # [什么是注意力遮罩?](../glossary#attention-mask)
        token_type_ids (`torch.LongTensor` of shape `({0}, 7)`, *optional*):
            # 编码表格结构的标记索引。可以使用 `AutoTokenizer` 获取这些索引。参见该类获取更多信息。

            # [什么是标记类型 ID?](../glossary#token-type-ids)
        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            # 每个输入序列标记在位置嵌入中的位置索引。如果 `TapasConfig` 的 `reset_position_index_per_cell` 设置为 `True`,将使用相对位置嵌入。选择范围在 `[0, config.max_position_embeddings - 1]`。

            # [什么是位置 ID?](../glossary#position-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            # 用于置零自注意力模块中选择的头部的遮罩。遮罩的值选择在 `[0, 1]`:

            # - 1 表示该头部**未被遮罩**,
            # - 0 表示该头部**被遮罩**。
        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            # 可选地,您可以选择直接传递嵌入表示而不是传递 `input_ids`。如果您希望更精确地控制如何将 `input_ids` 索引转换为相关联的向量,这将非常有用,胜过模型内部的嵌入查找矩阵。
        output_attentions (`bool`, *optional*):
            # 是否返回所有注意力层的注意力张量。更多细节请参见返回张量中的 `attentions`。
        output_hidden_states (`bool`, *optional*):
            # 是否返回所有层的隐藏状态。更多细节请参见返回张量中的 `hidden_states`。
        return_dict (`bool`, *optional*):
            # 是否返回 [`~utils.ModelOutput`] 而不是普通的元组。
"""
@add_start_docstrings(
    "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.",
    TAPAS_START_DOCSTRING,
)
class TapasModel(TapasPreTrainedModel):
    """
    This class defines the Tapas Model, which extends TapasPreTrainedModel.

    It can function as an encoder (self-attention only) or a decoder, incorporating cross-attention layers between
    self-attention layers, following the architecture in the paper "Attention is All You Need" by Ashish Vaswani et al.

    """

    def __init__(self, config, add_pooling_layer=True):
        super().__init__(config)
        self.config = config

        # Initialize TapasEmbeddings and TapasEncoder based on provided configuration
        self.embeddings = TapasEmbeddings(config)
        self.encoder = TapasEncoder(config)

        # Optionally initialize TapasPooler for pooling layer
        self.pooler = TapasPooler(config) if add_pooling_layer else None

        # Perform any additional initialization tasks
        self.post_init()

    def get_input_embeddings(self):
        return self.embeddings.word_embeddings

    def set_input_embeddings(self, value):
        self.embeddings.word_embeddings = value

    def _prune_heads(self, heads_to_prune):
        """
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        """
        # Iterates over specified layers and prunes heads accordingly
        for layer, heads in heads_to_prune.items():
            self.encoder.layer[layer].attention.prune_heads(heads)

    @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    # 初始化函数,用于创建一个新的TapasOnlyMLMHead对象,并初始化权重
    def __init__(self, config):
        # 调用父类的初始化方法,传入配置参数
        super().__init__(config)

        # 创建一个TapasModel对象,不添加池化层
        self.tapas = TapasModel(config, add_pooling_layer=False)
        # 创建一个TapasOnlyMLMHead对象
        self.cls = TapasOnlyMLMHead(config)

        # 调用后续处理函数,初始化权重并进行最终处理
        self.post_init()

    # 返回MLM头部的输出嵌入
    def get_output_embeddings(self):
        return self.cls.predictions.decoder

    # 设置MLM头部的输出嵌入为新的嵌入
    def set_output_embeddings(self, new_embeddings):
        self.cls.predictions.decoder = new_embeddings

    # 重写的前向传播函数,接受多个输入参数,并返回MaskedLMOutput对象
    @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
        **kwargs,
    ) -> Union[Tuple, MaskedLMOutput]:
        r"""
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`

        Returns:
            Depending on `return_dict`:
            - If `return_dict` is `False`, returns a tuple with `prediction_scores` followed by additional outputs.
            - If `return_dict` is `True`, returns a `MaskedLMOutput` object containing `loss`, `logits`, `hidden_states`, and `attentions`.

        Examples:

        ```
        >>> from transformers import AutoTokenizer, TapasForMaskedLM
        >>> import pandas as pd

        >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
        >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")

        >>> data = {
        ...     "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
        ...     "Age": ["56", "45", "59"],
        ...     "Number of movies": ["87", "53", "69"],
        ... }
        >>> table = pd.DataFrame.from_dict(data)

        >>> inputs = tokenizer(
        ...     table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt"
        ... )
        >>> labels = tokenizer(
        ...     table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
        ... )["input_ids"]

        >>> outputs = model(**inputs, labels=labels)
        >>> logits = outputs.logits
        ```

        Determines the return type based on `return_dict`. If `labels` are provided, computes the masked language modeling loss using `CrossEntropyLoss`.
        Returns either a tuple or a `MaskedLMOutput` object depending on `return_dict`.

        ```
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict
        ```

        Passes input arguments to the Tapas model and retrieves the outputs, including sequence output and prediction scores.

        ```
        outputs = self.tapas(
            input_ids,
            attention_mask=attention_mask,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            head_mask=head_mask,
            inputs_embeds=inputs_embeds,
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
        )
        ```

        Retrieves the sequence output from the Tapas model's outputs and computes prediction scores using a classifier layer.

        ```
        sequence_output = outputs[0]
        prediction_scores = self.cls(sequence_output)
        ```

        If `labels` are provided, calculates the masked language modeling loss using `CrossEntropyLoss`.

        ```
        masked_lm_loss = None
        if labels is not None:
            loss_fct = CrossEntropyLoss()  # -100 index = padding token
            masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
        ```

        Constructs the output based on whether `return_dict` is `False`, returning a tuple of outputs or including `masked_lm_loss` in a `MaskedLMOutput` object.

        ```
        if not return_dict:
            output = (prediction_scores,) + outputs[2:]
            return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output

        return MaskedLMOutput(
            loss=masked_lm_loss,
            logits=prediction_scores,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
        )
        ```
"""
Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables
(linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for
SQA, WTQ or WikiSQL-supervised tasks.
"""
# 使用 TapasStartDocstring 和 TAPAS_START_DOCSTRING 定义的文档字符串来注释 TapasForQuestionAnswering 类
@add_start_docstrings(
    """
    Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables
    (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for
    SQA, WTQ or WikiSQL-supervised tasks.
    """,
    TAPAS_START_DOCSTRING,
)
class TapasForQuestionAnswering(TapasPreTrainedModel):
    
    def __init__(self, config: TapasConfig):
        super().__init__(config)
        
        # base model
        self.tapas = TapasModel(config)
        
        # dropout (only used when training)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        
        # cell selection heads
        if config.init_cell_selection_weights_to_zero:
            # init_cell_selection_weights_to_zero: Whether the initial weights should be
            # set to 0. This ensures that all tokens have the same prior probability.
            self.output_weights = nn.Parameter(torch.zeros(config.hidden_size))
            self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size))
        else:
            self.output_weights = nn.Parameter(torch.empty(config.hidden_size))
            nn.init.normal_(
                self.output_weights, std=config.initializer_range
            )  # here, a truncated normal is used in the original implementation
            self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size))
            nn.init.normal_(
                self.column_output_weights, std=config.initializer_range
            )  # here, a truncated normal is used in the original implementation
        
        self.output_bias = nn.Parameter(torch.zeros([]))
        self.column_output_bias = nn.Parameter(torch.zeros([]))
        
        # aggregation head
        if config.num_aggregation_labels > 0:
            self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels)
        
        # Initialize weights and apply final processing
        self.post_init()

    @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
    # 定义一个方法,用于模型的前向传播
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,  # 输入的 token IDs,类型为可选的长整型张量
        attention_mask: Optional[torch.FloatTensor] = None,  # 注意力掩码,类型为可选的浮点数张量
        token_type_ids: Optional[torch.LongTensor] = None,  # token 类型 IDs,类型为可选的长整型张量
        position_ids: Optional[torch.LongTensor] = None,  # 位置 IDs,类型为可选的长整型张量
        head_mask: Optional[torch.FloatTensor] = None,  # 头部掩码,类型为可选的浮点数张量
        inputs_embeds: Optional[torch.FloatTensor] = None,  # 输入的嵌入表示,类型为可选的浮点数张量
        table_mask: Optional[torch.LongTensor] = None,  # 表格掩码,类型为可选的长整型张量
        labels: Optional[torch.LongTensor] = None,  # 标签,类型为可选的长整型张量
        aggregation_labels: Optional[torch.LongTensor] = None,  # 聚合标签,类型为可选的长整型张量
        float_answer: Optional[torch.FloatTensor] = None,  # 浮点型答案,类型为可选的浮点数张量
        numeric_values: Optional[torch.FloatTensor] = None,  # 数值,类型为可选的浮点数张量
        numeric_values_scale: Optional[torch.FloatTensor] = None,  # 数值的比例,类型为可选的浮点数张量
        output_attentions: Optional[bool] = None,  # 是否输出注意力信息,类型为可选的布尔值
        output_hidden_states: Optional[bool] = None,  # 是否输出隐藏状态,类型为可选的布尔值
        return_dict: Optional[bool] = None,  # 是否返回字典形式的输出,类型为可选的布尔值
"""
Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table
entailment tasks, such as TabFact (Chen et al., 2020).
"""
@add_start_docstrings(
    TAPAS_START_DOCSTRING,
)
class TapasForSequenceClassification(TapasPreTrainedModel):
    def __init__(self, config):
        """
        Initializes TapasForSequenceClassification model.

        Args:
            config (TapasConfig): Configuration object specifying the model architecture and parameters.
        """
        super().__init__(config)
        self.num_labels = config.num_labels

        # Initialize Tapas model
        self.tapas = TapasModel(config)
        # Dropout layer
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        # Linear layer for classification
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)

        # Initialize weights and apply final processing
        self.post_init()

    @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
    @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        token_type_ids: Optional[torch.LongTensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        head_mask: Optional[torch.FloatTensor] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        """
        Forward pass of the TapasForSequenceClassification model.

        Args:
            input_ids (torch.LongTensor, optional): Input IDs of the sequence.
            attention_mask (torch.FloatTensor, optional): Mask to avoid performing attention on padding token indices.
            token_type_ids (torch.LongTensor, optional): Segment token indices.
            position_ids (torch.LongTensor, optional): Indices of positions of each input sequence tokens in the model.
            head_mask (torch.FloatTensor, optional): Mask to nullify selected heads of the self-attention modules.
            inputs_embeds (torch.FloatTensor, optional): Embedded representations of input sequences.
            labels (torch.LongTensor, optional): Labels for computing the sequence classification loss.
            output_attentions (bool, optional): Whether to return attentions weights.
            output_hidden_states (bool, optional): Whether to return hidden states.
            return_dict (bool, optional): Whether to return a dictionary as output.

        Returns:
            SequenceClassifierOutput: Output of the sequence classification, including loss, logits, and optional hidden states and attentions.
        """
        """ TAPAS utilities."""


class AverageApproximationFunction(str, enum.Enum):
    """
    Enum defining average approximation functions.

    Includes:
    - RATIO: ratio approximation
    - FIRST_ORDER: first order approximation
    - SECOND_ORDER: second order approximation
    """
    RATIO = "ratio"
    FIRST_ORDER = "first_order"
    SECOND_ORDER = "second_order"


# Beginning of everything related to segmented tensors


class IndexMap(object):
    """
    Index grouping entries within a tensor.

    Attributes:
        indices (torch.LongTensor): Tensor containing the indices.
        num_segments (torch.LongTensor): Scalar tensor specifying the number of segments.
        batch_dims (int): Number of batch dimensions.
    """

    def __init__(self, indices, num_segments, batch_dims=0):
        """
        Creates an IndexMap instance.

        Args:
            indices (torch.LongTensor): Tensor containing the indices.
            num_segments (torch.LongTensor): Scalar tensor specifying the number of segments.
            batch_dims (int, optional): Number of batch dimensions. Defaults to 0.
        """
        self.indices = torch.as_tensor(indices)
        self.num_segments = torch.as_tensor(num_segments, device=indices.device)
        self.batch_dims = batch_dims

    def batch_shape(self):
        """
        Returns the batch shape of the indices tensor.

        Returns:
            torch.Size: Size object representing the shape of the indices tensor up to batch dimensions.
        """
        return self.indices.size()[: self.batch_dims]


class ProductIndexMap(IndexMap):
    """
    Index map representing the product of two indices.
    """
    def __init__(self, outer_index, inner_index):
        """
        Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
        intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
        and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
        combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
        *outer_index.num_segments* * *inner_index.num_segments*

        Args:
            outer_index (`IndexMap`):
                IndexMap.
            inner_index (`IndexMap`):
                IndexMap, must have the same shape as *outer_index*.
        """
        # 检查两个索引对象的批处理维度是否相同
        if outer_index.batch_dims != inner_index.batch_dims:
            raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.")

        # 调用父类的构造函数来初始化对象
        super().__init__(
            indices=(inner_index.indices + outer_index.indices * inner_index.num_segments),
            num_segments=inner_index.num_segments * outer_index.num_segments,
            batch_dims=inner_index.batch_dims,
        )
        # 存储外部索引对象和内部索引对象
        self.outer_index = outer_index
        self.inner_index = inner_index

    def project_outer(self, index):
        """Projects an index with the same index set onto the outer components."""
        # 将索引映射到外部组件
        indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode="floor").type(torch.long)
        return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)

    def project_inner(self, index):
        """Projects an index with the same index set onto the inner components."""
        # 将索引映射到内部组件
        return IndexMap(
            indices=torch.fmod(index.indices, self.inner_index.num_segments)
            .type(torch.float)
            .floor()
            .type(torch.long),
            num_segments=self.inner_index.num_segments,
            batch_dims=index.batch_dims,
        )
# 构造一个索引映射,其值为范围内的连续整数,用于表示段的编号
def range_index_map(batch_shape, num_segments, name="range_index_map"):
    """
    Constructs an index map equal to range(num_segments).

    Args:
        batch_shape (tuple): Shape of the batch dimensions.
        num_segments (int): Number of segments.
        name (str, optional): Name for the operation. Currently not used.

    Returns:
        IndexMap: An IndexMap object containing the constructed indices.
    """
    # 计算批量大小作为标量张量
    batch_size = torch.prod(torch.tensor(list(batch_shape)))
    # 创建偏移量作为长度为批量大小的一维张量,
    # 逐元素与段数相乘(以偏移批次中的不同元素)例如,如果批量大小为2:[0, 64]
    offset = torch.arange(start=0, end=batch_size, device='cpu') * num_segments
    offset = offset.view(batch_shape)
    # 根据索引映射的维度数范围(通常是range(1,2))多次展开偏移量
    for _ in range(1, len(batch_shape)):  # 通常范围为(1, 2)
        offset = offset.unsqueeze(-1)

    # 计算最终的索引,为偏移量加上原始索引
    indices = offset + torch.arange(num_segments, device='cpu').view(-1)
    return IndexMap(indices=indices.view(-1), num_segments=num_segments * batch_size, batch_dims=0)
    Args:
        batch_shape (`torch.Size`):
            Batch shape
        num_segments (`int`):
            Number of segments
        name (`str`, *optional*, defaults to 'range_index_map'):
            Name for the operation. Currently not used

    Returns:
        (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
    """
    # 将 batch_shape 转换为 long 类型的张量,创建一个包含 batch_shape 的一维张量(例如 [2])
    batch_shape = torch.as_tensor(
        batch_shape, dtype=torch.long
    )  # create a rank 1 tensor vector containing batch_shape (e.g. [2])
    assert len(batch_shape.size()) == 1  # 断言 batch_shape 的维度为 1

    # 将 num_segments 转换为张量,创建一个包含 num_segments 的标量张量(例如 64)
    num_segments = torch.as_tensor(num_segments)  # create a rank 0 tensor (scalar) containing num_segments (e.g. 64)
    assert len(num_segments.size()) == 0  # 断言 num_segments 的维度为 0,即为标量

    # 创建一个从 0 到 num_segments-1 的张量,设备与 num_segments 相同
    indices = torch.arange(
        start=0, end=num_segments, device=num_segments.device
    )  # create a rank 1 vector with num_segments elements

    # 创建一个新的张量,形状为 [1, num_segments],其中第一个元素为 1,其余维度与 batch_shape 相同
    new_tensor = torch.cat(
        [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)],
        dim=0,
    )
    # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension)

    # 将 new_tensor 转换为 Python 列表,并将其元素转换为整数,得到新的形状 new_shape
    new_shape = [int(x) for x in new_tensor.tolist()]
    # 通过重塑 indices 张量,使其形状为 new_shape
    indices = indices.view(new_shape)

    # 创建一个倍增张量,其形状为 [batch_shape, 1],将 indices 张量按 multiples 张量指定的次数重复
    multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0)
    indices = indices.repeat(multiples.tolist())
    # equivalent (in Numpy:)
    # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist()))

    # 返回 IndexMap 对象,包含 indices 张量、num_segments 和 batch_shape 的长度(作为 batch_dims)
    return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])
# 对输入的张量进行分段约简操作。
def _segment_reduce(values, index, segment_reduce_fn, name):
    """
    Applies a segment reduction segment-wise.

    Args:
        values (`torch.Tensor`):
            Tensor with segment values. 包含分段值的张量。
        index (`IndexMap`):
            IndexMap. 索引映射对象。
        segment_reduce_fn (`str`):
            Name for the reduce operation. One of "sum", "mean", "max" or "min". 约简操作的名称,可以是"sum"、"mean"、"max"或"min"之一。
        name (`str`):
            Name for the operation. Currently not used. 操作的名称,目前未使用

    Returns:
        (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
        返回值:形状为 batch_shape 的 IndexMap,其元素等于 range(num_segments)。
    """
    # Flatten the batch dimensions, as segments ops (scatter) do not support batching.
    # However if `values` has extra dimensions to the right keep them
    # unflattened. Segmented ops support vector-valued operations.
    # 压平批处理维度,因为分段操作(scatter)不支持批处理。
    # 如果 `values` 的右侧有额外的维度,则保持未压平。分段操作支持矢量值操作。
    flat_index = flatten(index)
    vector_shape = values.size()[len(index.indices.size()) :]  # torch.Size object
    flattened_shape = torch.cat(
        [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0
    )
    # 将 `values` 重塑为压平后的形状
    flat_values = values.reshape(flattened_shape.tolist())

    # Create a tensor filled with zeros for output
    # 为输出创建一个用零填充的张量
    out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device)
    # 在指定维度上进行分段约简操作,使用给定的约简函数 `segment_reduce_fn`
    segment_means = out.scatter_reduce(
        dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False
    )

    # Unflatten the values.
    # 将值重新恢复为原始形状。
    new_shape = torch.cat(
        [
            torch.as_tensor(index.batch_shape(), dtype=torch.long),
            torch.as_tensor([index.num_segments], dtype=torch.long),
            torch.as_tensor(vector_shape, dtype=torch.long),
        ],
        dim=0,
    )

    # Clone segment_means and reshape it to match the original `values` shape
    # 克隆 segment_means,并将其重塑以匹配原始 `values` 的形状
    output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype)
    # 创建并返回输出索引对象
    output_index = range_index_map(index.batch_shape(), index.num_segments)
    return output_values, output_index


def reduce_sum(values, index, name="segmented_reduce_sum"):
    """
    Sums a tensor over its segments.

    Outputs 0 for empty segments.

    This operations computes the sum over segments, with support for:

        - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
        - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of
          vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.

    Args:
        values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
            Tensor containing the values of which the sum must be taken segment-wise.
            包含需要进行分段求和的值的张量。
        index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
            Index defining the segments. 定义分段的索引对象。
        name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
            Name for the operation. Currently not used
            操作的名称,默认为 'segmented_reduce_sum',目前未使用
    """
    # 调用 _segment_reduce 函数,用于对输入的 values 和 index 进行分段求和操作
    # 返回的结果包括两部分:
    #   - output_values: 形状为 [B1, B2, ..., Bn, num_segments, V1, V2, ..] 的张量,包含了求和后的输出值
    #   - output_index: 类型为 IndexMap,形状为 [B1, B2, ..., Bn, num_segments],表示每个分段的索引映射
    return _segment_reduce(values, index, "sum", name)
# 对输入的张量在其各个段上求平均值,空段返回0
def reduce_mean(values, index, name="segmented_reduce_mean"):
    # 调用内部函数 _segment_reduce,执行平均值操作
    return _segment_reduce(values, index, "mean", name)


# 对输入的张量在其各个段上求最大值
def reduce_max(values, index, name="segmented_reduce_max"):
    # 调用内部函数 _segment_reduce,执行最大值操作
    return _segment_reduce(values, index, "amax", name)


# 对输入的张量在其各个段上求最小值
def reduce_min(values, index, name="segmented_reduce_min"):
    # 此函数未完成,仅有文档字符串作为占位符
    # 使用 `_segment_reduce` 函数计算各段的最小值。该函数支持以下特性:
    #
    # - 使用第一维度 [B1, B2, ..., Bn] 进行批处理。每个批次中的元素可以具有不同的索引。
    # - 使用最后一维度 [V1, V2, ...] 进行向量化。如果存在这些维度,则输出将是向量的逐元素最小值,而不是标量。
    #
    # 只有中间维度 [I1, ..., Ik] 会被操作减少。
    #
    # Args:
    #     values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
    #         包含要在每段中取最小值的张量。
    #     index (`IndexMap`, 索引的形状为 [B1, B2, ..., Bn, I1, .., Ik]):
    #         定义段的索引。
    #     name (`str`, *optional*, 默认为 'segmented_reduce_sum'):
    #         操作的名称。当前未使用。
    #
    # Returns:
    #     output_values (`torch.Tensor` 的形状为 [B1, B2, ..., Bn, num_segments, V1, V2, ..]):
    #         包含输出值的张量。
    #     output_index (`IndexMap`):
    #         形状为 [B1, B2, ..., Bn, num_segments] 的索引映射。
# 计算列 logits

def compute_column_logits(
    sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection
):
    """
    计算列的 logits。

    Args:
        sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            模型最后一层的隐藏状态输出,也称为 last_hidden_state。
        column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`):
            列选择线性层的权重。
        column_output_bias (`torch.FloatTensor` of shape `()`):
            列选择线性层的偏置。
        cell_index (`ProductIndexMap`):
            将标记分组为单元格的索引。
        cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
            表格中存在的单元格的掩码(即非填充单元格)。
        allow_empty_column_selection (`bool`):
            是否允许不选择任何列

    Returns:
        column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`):
            包含每个示例在批次中的列 logits 的张量。
    """

    # 首先,计算标记的 logits(batch_size, seq_len)- 不考虑温度
    token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias

    # 接下来,按单元格平均 logits(batch_size, max_num_cols*max_num_rows)
    cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)

    # 最后,按列平均 logits(batch_size, max_num_cols)
    column_index = cell_index.project_inner(cell_logits_index)
    column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)

    # 计算每列的单元格数
    cell_count, _ = reduce_sum(cell_mask, column_index)
    column_logits /= cell_count + EPSILON_ZERO_DIVISION

    # 掩盖不在示例中出现的列
    is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0))
    column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
        is_padding, dtype=torch.float32, device=is_padding.device
    )

    # 如果不允许空列选择,则将 logits 加上一个小量,以表示选择空列的代价
    if not allow_empty_column_selection:
        column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
            torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device
        )

    return column_logits
    Args:
        token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            每个标记的对数概率值的张量。
        column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`):
            每个列的对数概率值的张量。
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            每个标记的标签。
        cell_index (`ProductIndexMap`):
            将标记分组为单元格的索引映射。
        col_index (`IndexMap`):
            将标记分组为列的索引映射。
        cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
            表中存在的单元格的掩码(即不是填充的部分)。

    Returns:
        selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`):
            每个示例的损失值。
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
            新的对数概率值,只允许选择单个列中的单元格。
            根据 *column_logits* 最可能的列之外的对数概率设为非常低的值(概率为0)。
    """
    # Part 1: column loss

    # 首先确定应选择的列。我们使用具有最大选定单元格数的列。
    labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index)
    # labels_per_column 的形状是 (batch_size, max_num_cols)。
    # 它包含每个示例每列的标签数量。
    column_label = torch.argmax(labels_per_column, dim=-1)  # 形状为 (batch_size,)
    # 检查列中是否没有选定的单元格。在这种情况下,模型应该预测特殊的列 id 0,表示“不选择任何内容”。
    no_cell_selected = torch.eq(
        torch.max(labels_per_column, dim=-1)[0], 0
    )  # no_cell_selected 的形状为 (batch_size,),值为 True
    # 如果批处理中的某个示例没有选定单元格(即没有为该示例设置为1的标签),则将 column_label 设置为0。
    column_label = torch.where(
        no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label
    )

    column_dist = torch.distributions.Categorical(logits=column_logits)  # 形状为 (batch_size, max_num_cols)
    column_loss_per_example = -column_dist.log_prob(column_label)

    # Part 2: cell loss

    # 将标签和对数概率从每个标记减少到每个单元格。
    # logits_per_cell: 形状为 (batch_size, max_num_rows*max_num_cols) 即 (batch_size, 64*32)
    logits_per_cell, _ = reduce_mean(token_logits, cell_index)
    # labels_per_cell: 形状为 (batch_size, 64*32),指示每个单元格是否应该被选择(1)或不被选择(0)
    labels_per_cell, labels_index = reduce_max(
        torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index
    )

    # 选择所选列的掩码。
    # column_id_for_cells: 形状为 (batch_size, 64*32),指示每个单元格属于哪一列。
    # 使用 `cell_index` 对 `labels_index` 进行投影操作,并获取投影后的列索引
    column_id_for_cells = cell_index.project_inner(labels_index).indices

    # 创建一个形状为 (batch_size, 64*32) 的张量 `column_mask`,
    # 如果单元格属于要选择的列,则该值等于1
    column_mask = torch.as_tensor(
        torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)),
        dtype=torch.float32,
        device=cell_mask.device,
    )

    # 使用 Bernoulli 分布生成 `cell_dist`,logits_per_cell 的形状为 (batch_size, 64*32)
    cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell)
    # 计算每个单元格的对数似然,仅针对所选列
    cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32))  # 形状为 (batch_size, 64*32)

    # 计算单元格损失,乘以列掩码和单元格掩码
    cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1)

    # 将损失标准化为列中的单元格数目
    cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION

    # 将列损失初始化为每个示例的选择损失
    selection_loss_per_example = column_loss_per_example
    # 如果没有选择单元格,则设置损失为零,否则使用计算的 `cell_loss`
    selection_loss_per_example += torch.where(
        no_cell_selected.view(selection_loss_per_example.size()),
        torch.zeros_like(selection_loss_per_example),
        cell_loss,
    )

    # 通过对 `column_logits` 的最大值获取模型选择的列 ID
    selected_column_id = torch.as_tensor(
        torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device
    )  # 形状为 (batch_size,)

    # 创建一个形状为 (batch_size, 64*32) 的 `selected_column_mask`,
    # 如果单元格属于模型选择的列,则该值等于1
    selected_column_mask = torch.as_tensor(
        torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)),
        dtype=torch.float32,
        device=selected_column_id.device,
    )

    # 不选择特殊列 ID 为 0 的单元格
    selected_column_mask = torch.where(
        torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()),
        torch.zeros_like(selected_column_mask),
        selected_column_mask,
    )

    # 调整 `logits_per_cell`,确保在模型选择的列之外将概率设为0
    new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)
    # 使用 `cell_index` 收集新的 `new_logits_per_cell`,形状由 `cell_index` 决定
    logits = gather(new_logits_per_cell, cell_index)

    # 返回选择损失和调整后的 `logits`
    return selection_loss_per_example, logits
    """
    Computes logits per token

    Args:
        sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
        temperature (`float`):
            Temperature for the Bernoulli distribution.
        output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):
            Weights of the linear layer for cell selection.
        output_bias (`torch.FloatTensor` of shape `()`):
            Bias of the linear layer for cell selection

    Returns:
        logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.
    """
    # 计算每个 token 的 logits
    logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature

    return logits


def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier):
    """
    Finds examples where the model should select cells with no aggregation.

    Returns a mask that determines for which examples should the model select answers directly from the table, without
    any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only
    apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation
    case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the
    aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold
    for this is a hyperparameter *cell_selection_preference*

    Args:
        answer (`torch.FloatTensor` of shape `(batch_size, )`):
            Answer for every example in the batch. Nan if there is no scalar answer.
        pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
            Output of the pooler (BertPooler) on top of the encoder layer.
        cell_selection_preference (`float`):
            Preference for cell selection in ambiguous cases.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Labels per token.
        aggregation_classifier (`torch.nn.Linear`): Aggregation head

    Returns:
        aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use
        aggregation functions.
    """
    # 创建一个初始的聚合掩码,根据答案是否为数值来确定是否需要聚合
    aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device)
    
    # 使用汇聚分类器计算聚合操作的 logits
    logits_aggregation = aggregation_classifier(pooled_output)
    
    # 使用分类分布创建一个分布对象
    dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
    
    # 计算除了“不进行聚合”操作之外的所有聚合操作的总质量
    aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
    # 根据当前模型的选择例子进行细胞选择。
    is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference

    # 存在非空细胞选择监督的例子。
    is_cell_supervision_available = torch.sum(labels, dim=1) > 0

    # torch.where 与 tf.where(在 tensorflow 1 中)不等价,
    # 因此在条件上添加 .view,以匹配第一个张量的形状。
    aggregate_mask = torch.where(
        torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()),
        torch.zeros_like(aggregate_mask_init, dtype=torch.float32),
        aggregate_mask_init,
    )

    # 分离张量,使其不再跟踪梯度。
    aggregate_mask = aggregate_mask.detach()

    # 返回聚合掩码。
    return aggregate_mask
# 计算已知情况下的聚合损失
def _calculate_aggregation_loss_known(
    logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
):
    """
    在训练过程中,当聚合类型已知时计算聚合损失。

    在弱监督设置中,唯一已知的信息是对于单元格选择示例,应预测“无聚合”。对于其他需要聚合的示例,不会累积损失。在总是已知聚合类型的设置中,将为所有示例累积标准交叉熵损失。

    Args:
        logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
            每个聚合操作的logits。
        aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
            对应于应使用聚合函数的示例的掩码,设为1。
        aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
            每个示例的聚合函数 id。
        use_answer_as_supervision (`bool`, *可选*):
            是否将答案作为聚合示例的唯一监督。
        num_aggregation_labels (`int`, *可选*, 默认为0):
            要预测的聚合运算符数量。

    Returns:
        aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): 每个示例的聚合损失(在已知类型的情况下)。
    """
    if use_answer_as_supervision:
        # 为单元格选择示例准备“无聚合”目标。
        target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long)
    else:
        # 使用聚合监督作为目标。
        target_aggregation = aggregation_labels

    one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32)
    log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1)

    # torch.FloatTensor[batch_size]
    per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1)
    if use_answer_as_supervision:
        # 仅累积需要单元格选择(无聚合)的示例的损失。
        return per_example_aggregation_intermediate * (1 - aggregate_mask)
    else:
        return per_example_aggregation_intermediate


# 计算未知情况下的聚合损失
def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
    """
    在答案监督情况下计算聚合损失。

    Args:
        logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
            每个聚合操作的logits。
        aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
            对应于应使用聚合函数的示例的掩码,设为1。

    Returns:
        aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): 每个示例的聚合损失(在答案监督情况下)。
    """
    # 创建一个分类分布对象,基于给定的 logits 参数
    dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
    # 计算除了第一个索引外的所有聚合操作的总概率质量
    aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
    # 在需要聚合答案的情况下预测一些聚合操作。
    # 这会增加所有聚合函数的概率,类似于最大边际似然(MML),但不考虑函数是否给出正确答案。
    # 返回负对数似然乘以聚合掩码,用于损失函数计算
    return -torch.log(aggregation_ops_total_mass) * aggregate_mask
# 计算每个样本的聚合损失
def _calculate_aggregation_loss(
    logits_aggregation,
    aggregate_mask,
    aggregation_labels,
    use_answer_as_supervision,
    num_aggregation_labels,
    aggregation_loss_weight,
):
    """
    计算每个样本的聚合损失。

    Args:
        logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
            每个聚合操作的logits。
        aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
            对应于应使用聚合函数的样本的掩码,为1。
        aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
            每个样本的聚合函数 ID。
        use_answer_as_supervision (`bool`, *optional*):
            是否将答案作为聚合样本的唯一监督。
        num_aggregation_labels (`int`, *optional*, defaults to 0):
            预测的聚合操作数目。
        aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
            聚合损失的权重。

    Returns:
        aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): 每个样本的聚合损失。
    """
    # 使用已知的聚合损失计算函数
    per_example_aggregation_loss = _calculate_aggregation_loss_known(
        logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
    )

    if use_answer_as_supervision:
        # 对需要聚合的数值答案增加聚合损失
        per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)
    return aggregation_loss_weight * per_example_aggregation_loss


# 计算给定单元格和聚合概率时的期望结果
def _calculate_expected_result(
    dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
):
    """
    计算给定单元格和聚合概率时的期望结果。

    Args:
        dist_per_cell (`torch.distributions.Bernoulli`):
            每个单元格的选择分布。
        numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            每个标记的数值。对于非数值标记为NaN。
        numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            每个标记数值的缩放。
        input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            表的掩码,不包括问题标记和表头。
        logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
            每个聚合操作的logits。
        config ([`TapasConfig`]):
            包含模型所有超参数的配置类。

    Returns:
        expected_result (`torch.FloatTensor` of shape `(batch_size,)`): 每个样本的期望结果。
    """
    # 如果配置中使用 Gumbel 分布来处理单元格,则创建一个 RelaxedBernoulli 分布对象
    gumbel_dist = torch.distributions.RelaxedBernoulli(
        # 由于标记的 logit 已经被温度除过并用于计算单元格选择误差,因此这里需要再次乘以温度
        temperature=config.temperature,
        logits=dist_per_cell.logits * config.temperature,
    )
    # 从 Gumbel 分布中采样得到每个单元格的概率值
    scaled_probability_per_cell = gumbel_dist.sample()
else:
    # 如果配置中未使用 Gumbel 分布,则直接使用每个单元格的概率值
    scaled_probability_per_cell = dist_per_cell.probs

# <float32>[batch_size, seq_length],将每个单元格的概率按照数值缩放比例和输入掩码进行调整
scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float
# 计算每个批次中单元格概率的总和
count_result = torch.sum(scaled_probability_per_cell, dim=1)
# 将非数值表格值置零,使用 torch.where 进行条件替换
numeric_values_masked = torch.where(
    torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values
)  # Mask non-numeric table values to zero.
# 计算加权和结果,使用数值表格值乘以单元格概率和数值掩码
sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)
# 选择平均近似函数配置
avg_approximation = config.average_approximation_function
if avg_approximation == AverageApproximationFunction.RATIO:
    # 使用比率近似方法计算平均值结果
    average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
    # 使用一阶方法计算平均值结果,基于 TAPAS 论文附录 D 中的公式 X_c
    ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
    average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)
elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
    # 使用二阶方法计算平均值结果,基于 TAPAS 论文附录 D 中的公式
    ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
    pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
    var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var
    multiplier = (var / torch.square(ex) + 1) / ex
    average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)
else:
    # 如果配置中的平均近似函数配置无效,则抛出异常
    raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}")

if config.use_gumbel_for_aggregation:
    # 如果配置中使用 Gumbel 分布来处理聚合操作,则创建一个 RelaxedOneHotCategorical 分布对象
    gumbel_dist = torch.distributions.RelaxedOneHotCategorical(
        config.aggregation_temperature, logits=logits_aggregation[:, 1:]
    )
    # <float32>[batch_size, num_aggregation_labels - 1],从 Gumbel 分布中采样得到聚合操作的概率值
    aggregation_op_only_probs = gumbel_dist.sample()
    else:
        # 计算去除第一列后的 logits 的 softmax 操作,用于聚合操作概率计算
        aggregation_op_only_probs = nn.functional.softmax(
            logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1
        )

    # 将三个结果张量按列拼接成一个张量
    all_results = torch.cat(
        [
            torch.unsqueeze(sum_result, dim=1),
            torch.unsqueeze(average_result, dim=1),
            torch.unsqueeze(count_result, dim=1),
        ],
        dim=1,
    )

    # 计算期望的聚合结果,通过加权求和得到
    expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)
    # 返回期望的结果张量
    return expected_result
# PyTorch 目前不支持带有自定义 delta 的 Huber 损失函数,因此我们自己定义它
def huber_loss(input, target, delta: float = 1.0):
    # 计算输入和目标之间的绝对误差,形状为 (batch_size,)
    errors = torch.abs(input - target)
    # 根据误差是否小于 delta,选择计算平方误差的一半或线性误差减去常量
    return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2))


def _calculate_regression_loss(
    answer,
    aggregate_mask,
    dist_per_cell,
    numeric_values,
    numeric_values_scale,
    input_mask_float,
    logits_aggregation,
    config,
):
    """
    计算每个样本的回归损失。

    Args:
        answer (`torch.FloatTensor` of shape `(batch_size,)`):
            每个样本的答案。如果没有标量答案则为 NaN。
        aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`):
            对应需要使用聚合函数的样本的掩码,为1。
        dist_per_cell (`torch.distributions.Bernoulli`):
            每个单元格的选择分布。
        numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            每个标记的数值。对于非数值标记为 NaN。
        numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            每个标记数值的规模。
        input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
            表格的掩码,不包括问题标记和表头。
        logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
            每种聚合操作的对数。
        config ([`TapasConfig`]):
            模型配置类,包含模型的所有参数。

    Returns:
        per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): 每个样本的答案损失(已缩放)。
        large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): 一个掩码,对于损失超过 answer_loss_cutoff 的样本为 1。
    """
    # float32 (batch_size,)
    # 计算预期结果
    expected_result = _calculate_expected_result(
        dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
    )

    # float32 (batch_size,)
    # 将答案中的 NaN 替换为零
    answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer)

    if config.use_normalized_answer_loss:
        # 计算归一化因子,避免零除错误
        normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach()

        # 对答案和预期结果进行归一化
        normalized_answer_masked = answer_masked / normalizer
        normalized_expected_result = expected_result / normalizer

        # 使用 Huber 损失函数计算答案损失
        per_example_answer_loss = huber_loss(
            normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask
        )
    else:
        # 使用 Huber 损失函数计算答案损失,使用配置中的 delta
        per_example_answer_loss = huber_loss(
            expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta
        )
    # 如果配置中的答案损失截断为 None,则创建一个全为1的张量,与 per_example_answer_loss 的形状相同
    if config.answer_loss_cutoff is None:
        large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32)

    else:
        # 否则,根据答案损失是否大于答案损失截断值,创建一个掩码张量
        large_answer_loss_mask = torch.where(
            per_example_answer_loss > config.answer_loss_cutoff,  # 条件:答案损失大于答案损失截断值
            torch.zeros_like(per_example_answer_loss, dtype=torch.float32),  # 答案损失大于截断值时的值
            torch.ones_like(per_example_answer_loss, dtype=torch.float32),  # 答案损失不大于截断值时的值
        )
    
    # 计算每个样本的答案损失按比例缩放后的值,乘以聚合掩码
    per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)

    # 返回按比例缩放后的答案损失和大答案损失掩码
    return per_example_answer_loss_scaled, large_answer_loss_mask
posted @ 2024-07-01 10:56  绝不原创的飞龙  阅读(54)  评论(0编辑  收藏  举报