scrapy基础知识之将item 通过pipeline保存数据到mysql mongoDB:
pipelines.py
class xxPipeline(object):
def process_item(self, item, spider):
con=pymysql.connect(host='localhost,user='',passwd='',db='',charset='utf8')
cur=con.cursor()
sql=("insert into 表名(字段)”"values(%s)")
lis=(item['字段'])
cur.execute(sql,lis)
con.commit()
cur.close()
con.close()
return item
在settings.py中配置
ITEM_PIPELINES = {
’MySpider.pipelines.xxPipeline': 300,
}
需要在mysql中创建数据库以及数据表字段
pipelines.py
import pymongo
class MyspiderPipeline(object):
def __init__(self):
# pymongo.MongoClient(host, port) 创建MongoDB链接
client = pymongo.MongoClient(host="localhost",port=27017)
# 指向指定的数据库
db_name = client["dbname"]
table_name=db_name["tablename"]
# 获取数据库里存放数据的表名
self.post = table_name
def process_item(self, item, spider):
data = dict(item)
# 向指定的表里添加数据
self.post.insert(data)
return item
settings.py配置
DEFAULT_REQUEST_HEADERS = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0'}