import json
import re
import requests
from requests.exceptions import RequestException
import time
# 获取html
def get_one_page(url):
try:
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36'
}
response=requests.get(url,headers=headers)
if response.status_code==200:
return response.text
return None
except RequestException:
return None
# 正则表达式提取
def parse_one_page(html):
pattern=re.compile(
'<dd>.*?board-index.*?>(.*?)</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime.*?>(.*?)</p>.*?integer.*?>(.*?)</i>.*?fraction.*?>(.*?)</i>.*?</dd>'
,re.S)
items=re.findall(pattern,html)
# print(items) #打印 正则表达式匹配的每页的数据
for item in items:
print(item) # 打印每个电影的数据
# for ii in item: # 打印每条 像 index title 等内容
# print(ii)
yield{
'index':item[0],
'image': item[1],
'title': item[2],
'actor': item[3].strip(),
'time': item[4].strip()[5:],
'score': item[5]+item[6]
}
def write_to_file(content):
with open('result.txt','a',encoding='utf-8') as f: # 因为是循环main所以要打开多次,需要a
# print(content) # 打印要写入的json内容
# print(type(json.dumps(content))) # 打印jso类型
f.write(json.dumps(content,ensure_ascii=False)+'\n')
def main(offset):
url="https://maoyan.com/board/4?offset="+str(offset)
# print(url) # 打印url
html=get_one_page(url)
# print(html) #打印原始html网页
for item in parse_one_page(html):
write_to_file(item)
if __name__ == '__main__':
for i in range(10):
main(offset=i*10)
time.sleep(1)