import requests
from bs4 import BeautifulSoup
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import re
import jieba
import numpy as np
from wordcloud import WordCloud, ImageColorGenerator
url = "https://comment.bilibili.com/92542241.xml"
r = requests.get(url)
r.encoding = 'utf8'
soup = BeautifulSoup(r.text,'lxml')
d = soup.find_all('d')
dlst = []
n = 0
for i in d:
n += 1
danmuku = {}
danmuku['弹幕'] = i.text
danmuku['网址'] = url
danmuku['时间'] = datetime.date.today()
dlst.append(danmuku)
df = pd.DataFrame(dlst)
with open('sign.txt','w',encoding='utf8') as f:
for text in df['弹幕'].values:
pattern = re.compile(r'[一-龥]+')
filter_data = re.findall(pattern,text)
f.write("".join(filter_data))
with open('sign.txt', 'r', encoding='utf8') as f:
data = f.read()
segment = jieba.lcut(data)
words_df = pd.DataFrame({"segment": segment})
word_stat = words_df.groupby(by=['segment'])['segment'].agg({'计数':np.size})
words_stat = word_stat.reset_index().sort_values(by=['计数'],ascending=False)
wordcloud = WordCloud(
font_path="/Library/Application Support/Apple/Fonts/iLife/BalegaRegular.otf",
background_color="white",
max_words=3000,
max_font_size=200,
random_state=100,
width=1000, height=860, margin=2,
)
word_frequence = {x[0]: x[1] for x in words_stat.head(500).values}
word_frequence_dict = {}
for key in word_frequence:
word_frequence_dict[key] = word_frequence[key]
wordcloud.generate_from_frequencies(word_frequence_dict)
wordcloud.to_file('output.png')
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 零经验选手,Compose 一天开发一款小游戏!
· 因为Apifox不支持离线,我果断选择了Apipost!
· 通过 API 将Deepseek响应流式内容输出到前端