NLP(三):word2vec + siameseLSTM

一、词嵌入

复制代码
import jieba
from gensim.models import Word2Vec
import torch
import gensim
import numpy as np
model = gensim.models.KeyedVectors.load_word2vec_format('model\\word2vec.bin', binary=True)
class WordEmbedding(object):
    def __init__(self):
        pass
    def sentenceTupleToEmbedding(self, data1, data2):
        aCutListMaxLen = max([len(list(jieba.cut(sentence_a))) for sentence_a in data1])
        bCutListMaxLen = max([len(list(jieba.cut(sentence_a))) for sentence_a in data2])
        maxLen = max(aCutListMaxLen,bCutListMaxLen)
        seq_len = maxLen
        a = self.sqence_vec(data1, seq_len) #batch_size, sqence, embedding
        b = self.sqence_vec(data2, seq_len)
        return torch.FloatTensor(a), torch.FloatTensor(b)
    def sqence_vec(self, data, seq_len):
        data_a_vec = []
        for sequence_a in data:
            sequence_vec = []  # sequence * 128
            for word_a in jieba.cut(sequence_a):
                if word_a in model:
                    sequence_vec.append(model[word_a])
            sequence_vec = np.array(sequence_vec)
            add = np.zeros((seq_len - sequence_vec.shape[0], 128))
            sequenceVec = np.vstack((sequence_vec, add))
            data_a_vec.append(sequenceVec)
        a_vec = np.array(data_a_vec)
        return a_vec
复制代码

二、dataSet设置

复制代码
import torch.utils.data as data
import torch

class DatasetIterater(data.Dataset):
    def __init__(self, texta, textb, label):
        self.texta = texta
        self.textb = textb
        self.label = label
    def __getitem__(self, item):
        texta = self.texta[item]
        textb = self.textb[item]
        label = self.label[item]
        return texta, textb, label
    def __len__(self):
        return len(self.texta)
复制代码

三、SiameseLSTM

复制代码
import torch
from torch import nn
class SiameseLSTM(nn.Module):
    def __init__(self, input_size):
        super(SiameseLSTM, self).__init__()
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=10, num_layers=4, batch_first=True)
        self.fc = nn.Linear(10, 1)
    def forward(self, data1, data2):
        out1, (h1, c1) = self.lstm(data1)
        out2, (h2, c2) = self.lstm(data2)
        pre1 = out1[:, -1, :]
        pre2 = out2[:, -1, :]
        dis = torch.abs(pre1 - pre2)
        out = self.fc(dis)
        return out
复制代码

四、mainProcess

复制代码
import torch
from torch import nn
from torch.utils.data import DataLoader
import pandas as pd
from datasetIterater import DatasetIterater
import jieba
from wordEmbedding import WordEmbedding
from siameseLSTM import  SiameseLSTM

learning_rate = 0.001
train_texta = pd.read_csv("data/POI/negtive.csv")["address_1"]
train_textb = pd.read_csv("data/POI/negtive.csv")["address_2"]
train_label = pd.read_csv("data/POI/negtive.csv")["tag"]
train_data = DatasetIterater(train_texta,train_textb,train_label)
train_iter = DataLoader(dataset=train_data,batch_size=32,shuffle=True)

net = SiameseLSTM(128)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
word = WordEmbedding()
train_loss = []
for epoch in range(10):
    for batch_id, (data1, data2, label) in enumerate(train_iter):
        a, b = word.sentenceTupleToEmbedding(data1, data2)
        distence = net(a, b)
        loss = criterion(distence, label.float().unsqueeze(-1))
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss.append(loss.item())
        if batch_id % 10==0:
            print(epoch, batch_id, loss.item())
复制代码

 

posted @   jasonzhangxianrong  阅读(375)  评论(0编辑  收藏  举报
编辑推荐:
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
阅读排行:
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
点击右上角即可分享
微信分享提示