调试chatglm4代码
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from modeling_chatglm import ChatGLMForConditionalGeneration
from configuration_chatglm import ChatGLMConfig
a=ChatGLMForConditionalGeneration(config=ChatGLMConfig(num_layers=2,hidden_size=4,rope_ratio= 500,
original_rope= True,
padded_vocab_size= 151552,
post_layer_norm= True,
rmsnorm= True,
seq_length= 131072,
use_cache= True,
torch_dtype= 'bfloat16',
tie_word_embeddings= False,
eos_token_id= [151329, 151336, 151338],
pad_token_id= 151329))
import numpy as np
b=a(torch.tensor([[2,3,5]]))
print(b)
总结可以看到代码上跟3代没有任何区别.只是参数改了改.