import os
import requests
from bs4 import BeautifulSoup
import urllib.parse
def get_trending_repos(lang, since="weekly"):
# 定义目标URL
url = f"https://github.com/trending/{lang}?since={since}"
encoded_url = urllib.parse.quote(url, safe=':/?=&#')
# 发送HTTP请求
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
response = requests.get(encoded_url, headers=headers)
# 检查请求是否成功
if response.status_code == 200:
# 使用BeautifulSoup解析HTML
soup = BeautifulSoup(response.text, "html.parser")
# 查找所有项目链接
repo_links = []
article_elements = soup.find_all("article", class_="Box-row")
for article in article_elements:
h2_element = article.find("h2")
if h2_element:
a_element = h2_element.find("a")
if a_element:
href = a_element.get("href")
if href:
full_url = f"https://github.com{href.strip()}"
repo_links.append(full_url)
# 输出所有项目链接
for link in repo_links:
cmd = f"buku -a {link}"
print(cmd)
os.system(cmd)
else:
print(f"请求失败,状态码: {response.status_code}")
languages = ["go", "rust", "python", "kotlin", "java", "typescript", "javascript", "c", "c++", "c#"]
for lang in languages:
get_trending_repos(lang)