疫情爬取
编程爬取每日最新的疫情统计数据。并将爬取结果导入到数据库中。将可视化结果与统计数据结合,实时显示当前最新数据。
from jieba.analyse import * from lxml import etree from pymysql import connect import requests import jieba class CVPR: # 保存数据 def saveContent_list(self,title,zhaiyao,guanjian,lianjie): # 打开数据库连接(ip/数据库用户名/登录密码/数据库名) con = connect("localhost", "root", "123", "pachong") # 使用 cursor() 方法创建一个游标对象 cursor cursors = con.cursor() # 使用 execute() 方法执行 SQL 查询 返回的是你影响的行数 row = cursors.execute("insert into CVPR values(%s,%s,%s,%s)", (title,zhaiyao,guanjian,lianjie)) # 使用 fetchone() 方法获取数据. con.commit() # 关闭数据库连接(别忘了) con.close() headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36" } url = " http://openaccess.thecvf.com/CVPR2019.py" proxies = { "http": " http://211.147.226.4", "https": " http://122.200.90.12", } cvpr = CVPR() response = requests.get(url,headers=headers) html_str = etree.HTML(response.content.decode()) #获得标题 hrefs = html_str.xpath("//div[@id='content']/dl/dt/a/@href") hrefs = hrefs[113:-1:1] for href in hrefs: href = " http://openaccess.thecvf.com/{0}".format(href) response2 = requests.get(href,headers=headers) html_str = etree.HTML(response2.content.decode()) lunwens = {} title = html_str.xpath("//div[@id='content']/dl/dd//div[@id='papertitle']/text()") lianjie = html_str.xpath("//div[@id='content']/dl/dd//a/@href") zhaiyao = html_str.xpath("//div[@id='content']/dl/dd//div[@id='abstract']/text()") if not title and not lianjie and not zhaiyao: continue guanjian= "" for keyword, weight in extract_tags(zhaiyao[0].strip(), topK=5, withWeight=True): guanjian += keyword+" " try: cvpr.saveContent_list(title[0].strip(),zhaiyao[0].strip(),guanjian," http://openaccess.thecvf.com/"+lianjie[0].strip('../../')) print("存入成功") except: print("存入失败")