爬虫大作业

f=open("F:/Pycharm/project/test.txt",'w+',encoding='utf8')
import jieba
import requests
from bs4 import BeautifulSoup

def nba(url):
res = requests.get(url)
res.encoding = 'UTF-8'
soup = BeautifulSoup(res.text, 'html.parser')
news=soup.select('.article')[0].select('p')
for i in news:
print(i.text)
f.write(i.text)


nba('http://sports.sina.com.cn/basketball/nba/2018-04-28/doc-ifztkpip6804554.shtml')

f = open("F:/Pycharm/project/test.txt", 'r', encoding='utf8')
str = f.read()
f.close()
useless=[",","。",'了','在','的']
wordList = jieba.cut(str)
wordList = list(jieba.cut(str))

wordDic = {}
for i in set(wordList):
wordDic[i] = wordList.count(i)

sort_word = sorted(wordDic.items(), key=lambda d: d[1], reverse=True)
for i in range(60):
print(sort_word[i])

fo = open("F:/Pycharm/project/test.txt", 'w', encoding='utf8')
for i in range(60):
fo.write(sort_word[i][0] + '\n')

fo.close()


 

 将高频词汇放入test.txt以后打开 http://www.picdata.cn/ 用网上词云生成图片

 

 

posted @ 2018-04-28 15:19  Hiro-D  阅读(310)  评论(1编辑  收藏  举报