自主研发加AI爬虫高级代码

import requests
from bs4 import BeautifulSoup
import pandas as pd
import random
import time

用户代理列表

USER_AGENTS = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15",
"Mozilla/5.0 (Linux; Android 10; Pixel 3 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36"
]

代理设置

PROXIES = {
"http": "http://username:password@proxy_ip:port",
"https": "http://username:password@proxy_ip:port"
}

抓取函数

def fetch_data(url):
headers = {
"User-Agent": random.choice(USER_AGENTS),
}
try:
response = requests.get(url, headers=headers, proxies=PROXIES, timeout=10)
response.raise_for_status() # 检查请求是否成功
return response.text
except requests.exceptions.RequestException as e:
print(f"Error fetching {url}: {e}")
return None

解析数据

def parse_data(html):
soup = BeautifulSoup(html, 'html.parser')
products = []

for item in soup.select('.product-item'):  
    title = item.select_one('.product-title').get_text(strip=True)  
    price = item.select_one('.product-price').get_text(strip=True)  
    link = item.select_one('a')['href']  
    
    products.append({  
        'Title': title,  
        'Price': price,  
        'Link': link  
    })  

return products  

主运行函数

def main():
url = 'https://example.com/products' # 替换为目标网址
html = fetch_data(url)

if html:  
    products = parse_data(html)  
    df = pd.DataFrame(products)  
    df.to_csv('products.csv', index=False, encoding='utf-8-sig')  
    print("Data saved to products.csv")  

    # 打印数据示例  
    print(df.head())  

if name == "main":
main()``

posted @ 2024-12-11 17:36  kshuo  阅读(52)  评论(1)    收藏  举报