import threading
from queue import Queue
from random import choice

dealList=["红烧猪蹄", "卤鸡爪", "酸菜鱼", "糖醋里脊", "九转大肠", "阳春面", "烤鸭", "烧鸡", "剁椒鱼头", "酸汤肥牛", "炖羊肉"]

queue=Queue(maxsize=5)

#厨子生产
def func_one(name):
    for num in range(4):
        data_veg=choice(dealList)
        queue.put(data_veg,block=True)
        print(f"厨师{name}给大家做了一道:{data_veg}")

#客人消费
def func_two(name:str):
    for num in range(3):
        veg_data=queue.get()
        print(f"客人{name}吃掉了:{veg_data}")
        queue.task_done()

if __name__=="__main__":
    # 创建生产者线程,总共三个厨子,相当于给每个厨子创建了一个线程
    for name in["张三","李四","王五"]:
        thread_one=threading.Thread(target=func_one,args=(name,))
        thread_one.start()
    # 创建消费者四个线程
    for name in ["客人甲", "客人乙", "坤哥", "凡哥"]:
        thread_two=threading.Thread(target=func_two,args=(name,))
        thread_two.start()
    queue.join()
# 创建一个包含两个元素的元组  
data = ("example title", "http://example.com/image.jpg")

# 进行元组拆包  
title, img_url = data

# 打印结果  
print(title)  # 输出: example title  
print(img_url)  # 输出: http://example.com/image.jpg

import requests
from bs4 import BeautifulSoup
from queue import Queue
from threading import Thread

"""
1、检查生产者队列当中是否有数据
2、打印生产者线程生产的数据
3、将生产者线程产生的数据添加到消费者队列当中
4、在消费者线程当中,打印消费者队列当中的数据
"""

# 生产者类:获取页面信息
class GetData(Thread):
    def __init__(self,get_queue, save_queue):
        super().__init__()
        self.get_queue=get_queue
        self.save_queue=save_queue
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36 Edg/123.0.0.0"
        }


    def run(self):
        while True:
            if self.get_queue.empty():
                break
            url=self.get_queue.get()
            # print(url)  第一步
            self.get_data_index(url)

    def get_data_index(self, url):
        response = requests.get(url, headers=self.headers)
        soup = BeautifulSoup(response.text, "lxml")
        data_list = soup.find_all("img", class_="lazy")
        for data in data_list:
            title = data.get("alt")
            img_url = "https:" + data.get("data-original")
            self.save_queue.put((title, img_url))



# 消费者类:保存页面信息
class SaveData(Thread):
    def __init__(self, get_queue, save_queue):
        # super().__init__()
        Thread.__init__(self)
        self.save_queue = save_queue
        self.get_queue = get_queue

    def run(self):
        while True:
            try:
                data = self.save_queue.get(timeout=3)
                title, img_url = data
                with open("./image/" + title + ".png", "wb") as file:
                    resp = requests.get(img_url).content
                    file.write(resp)
                    print(f"{title}---保存成功")
                # if self.save_queue.empty() and self.get_queue.empty():
                #     break
            except Exception as e:
                print(e)
                print("代码结束执行")
                break



def main():
    get_queue = Queue()
    save_queue = Queue()

    for num in range(1, 6):
        url = "https://sc.chinaz.com/psd/list_xinmeiti_{}.html".format(num)
        get_queue.put(url)

    for x in range(3):
        t1 = GetData(get_queue, save_queue)
        t1.start()
        t2 = SaveData(get_queue, save_queue)
        t2.start()


if __name__ == '__main__':
    main()

posted on 2024-05-12 20:02  下雨天的眼睛  阅读(6)  评论(0编辑  收藏  举报