python爬虫——利用BeautifulSoup4爬取糗事百科的段子

 1 import requests
 2 from bs4 import BeautifulSoup as bs
 3 
 4 #获取单个页面的源代码网页
 5 def gethtml(pagenum):
 6     url = 'http://www.qiushibaike.com/hot/page/'+str(pagenum)+'/?s=4949992'
 7     req = requests.get(url,headers = Headers)
 8     html = req.text
 9     #print(html)
10     return html
11 
12 #获取单个页面的所有段子
13 def getitems(pagenum):
14     html = gethtml(pagenum)
15     soup = bs(html,"html.parser")
16     f = soup.find_all('div','content')
17     items =[]
18     for x in f:
19         #print(x.get_text())
20         items.append(x.get_text())
21     #print(items)
22     return items
23 
24 #分别打印单个页面的所有段子        
25 def getduanzi(pagenum):
26     n = 0
27     for x in getitems(pagenum):
28         n +=1
29         print('第%d条段子:\n%s' % (n,x))
30 
31 #分别打印所有页面的段子
32 def getall(bginpage,endpage):
33    
34     try:
35         for pagenum in range(int(bginpage),int(endpage)+1):
36             print(('----------华丽丽的分割线【第%d页】----------'% pagenum).center(66))
37             getduanzi(pagenum)
38     except:
39         print('页码输入错误,只接收正整数输入。')   
40     
41 if __name__ == '__main__':
42 
43     Headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
44     bginpage = input('输入起始页:').strip()
45     endpage = input('输入终止页:').strip()
46     getall(bginpage,endpage)
47     
48     

 

posted @ 2017-01-21 21:11  晴空行  阅读(2568)  评论(0编辑  收藏  举报