keras字符编码

https://www.jianshu.com/p/258a21ae0390
https://blog.csdn.net/apengpengpeng/article/details/80866034
#-*-coding:utf-8-*-
# import numpy as np
#
# samples = ['The cat sat on the mat.', 'The dog ate my homework.']
#
# # 10
# # 定义一个集合,得到{'The': 1, 'cat': 2, 'sat': 3, 'on': 4, 'the': 5, 'mat.': 6, 'dog': 7, 'ate': 8, 'my': 9, 'homework.': 10},也就是筛选出这个句子中对应的了哪些词,然后并赋予索引值,其实就是个词库
# token_index = {}
# for sample in samples:
# for word in sample.split():
# if word not in token_index:
# token_index[word] = len(token_index) + 1
#
# # 限制了读取的句子的长度,一句话最长10个词
# print(token_index)
# max_length = 10
# results = np.zeros(shape=(len(samples),
# max_length,
# max(token_index.values()) + 1))
#
# # print(results) 2, 10, 11
# for i, sample in enumerate(samples):
# for j, word in list(enumerate(sample.split()))[:max_length]:
# index = token_index.get(word)
# results[i, j, index] = 1.
# print(results)

import numpy as np
import string
samples = ['The cat sat on the mat.', 'The dog ate my homework.']
# 预先定义一个字符集 '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~‘
characters = string.printable
token_index = dict(zip(range(1, len(characters) + 1), characters))

max_length = 50
results = np.zeros((len(samples), max_length, max(token_index.keys()) + 1))
for i, sample in enumerate(samples):
for j, character in enumerate(sample):
for key, value in token_index.items():
if value == character:
index = key
results[i, j, index] = 1.


print(results)
posted @   水木清扬  阅读(215)  评论(0编辑  收藏  举报
编辑推荐:
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
阅读排行:
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
点击右上角即可分享
微信分享提示