python crawler0723.py
#!/usr/env python
#-*- coding: utf-8 -*-
import urllib
import urllib2
import random
import requests
import os,sys
import MySQLdb
from sgmllib import SGMLParser
import re
num=0
def main():
try:
conn=MySQLdb.connect(host='localhost',user='root',passwd='123456',db='addressbookdb',charset="utf8")
conn.query("set names utf8")
except Exception,e:
print e
sys.exit()
cursor=conn.cursor()
for k in range(1,2574):
url="http://apk.gfan.com/apps_7_1_"+str(k)+".html"
html=requests.get(url)
result=html.content
pattern=re.compile('<a href="([http://apk.gfan.com]?/Product/App\d{1,8}.html)"')
dataresult=re.findall(pattern,result)
dataresult=list(set(dataresult))
for i in dataresult:
t="http://apk.gfan.com"+i
print t
html=requests.get(t)
result=html.content
pattern=re.compile('<div class="appdiscrib">[\s\S]*?<h4>(.+?)</h4>')
data0=re.findall(pattern,result)
print data0[0]
pattern=re.compile('版 本 号(.+?)</li>')
data1=re.findall(pattern,result)
pattern=re.compile('开 发 者(.+?)</li>')
data2=re.findall(pattern,result)
pattern=re.compile('发布时间(.+?)</li>')
data3=re.findall(pattern,result)
pattern=re.compile('文件大小(.+?)</li>')
data4=re.findall(pattern,result)
pattern=re.compile('支持固件(.+?)</li>')
data5=re.findall(pattern,result)
pattern=re.compile('应用介绍</h3>[\s\S]*?<div class="intro">([\s\S]*?)</div>')
data6=re.findall(pattern,result)
for items in data6:
pass#print re.sub('<br />',' ',items)
sql="insert into address(name,version,developer,pubtime,filesize,support,introduction) values(%s,%s,%s,%s,%s,%s,%s)"
for items in data6:
if(data5):
values=(data0[0],data1[0],data2[0],data3[0],data4[0],data5[0],re.sub('<br />',' ',items))
else:
values=(data0[0],data1[0],data2[0],data3[0],data4[0],'NULL',re.sub('<br />',' ',items))
#print values
#print sql % values
try:
cursor.execute(sql,values)
conn.commit()
except:
pass
pattern=re.compile(' <div class="appTitle clearfix">[\s\S]*?<img src=(.+?)/>')
data=re.findall(pattern,result)
for j in data:
print j
#temp = urllib2.urlopen(i[10:])
# 这个是保存函数,第一个参数是地址,第二个是保存的文件名,让地址的倒数8位,当做文件名
#urllib.urlretrieve(j[1:-2], j[-40:])
temp=requests.get(j[1:-2])
global num
f=file("picture/"+str(num),"w+")
num=num+1
print num
f.write(temp.content)
#sql="select * from address"
#cursor.execute(sql)
#conn.commit()
#finalresult=cursor.fetchall()
#if finalresult:
#for x in finalresult:
#pass #print x[0:]
cursor.close()
conn.close()
f.close()
if __name__=="__main__":
main()