初步的百度爬虫
from bs4 import BeautifulSoup import urllib2 import urllib import re import urlparse param = raw_input('Please input what your want search') # www.baidu.com/s?&wd=kkkkkkkkkkkk yeshu = int(raw_input('Please input page number 1-10')) #www.baidu.com/s?wd=11111&pn=20 for i in range(yeshu): i = i * 10 url = 'http://www.baidu.com/s?&wd='+param+'&pn='+str(i) try: req = urllib2.urlopen(url) except urllib2.URLError,e: continue content = req.read() soap = BeautifulSoup(content) link = soap.find_all(class_ = 't') href = [] for i in range(len(link)): pattern = re.compile('href=\"(.+?)\"') rs = pattern.findall(str(link[i])) if len(rs) == 0: break href.append(str(rs[0])) for t in range(len(href)): try: ss = urllib2.urlopen(href[t]) except urllib2.URLError,e: continue real = ss.geturl() domain = urlparse.urlparse(real) realdomain = domain.netloc fp = open('url.txt','a+') fp.write(realdomain+'\n') fp.close()