【Python】微博自动抢红包

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# -*- coding: utf-8 -*-
import requests
import js2xml
from lxml import etree
headers = {
# 这边cookie替换成你的cookie
'Cookie':'9b',
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19',
}
# 获取红包列表
 
 
def getuid():
    url = 'http://chunjie.hongbao.weibo.com/hongbao2017/h5index'
    # 带上request headers
    z = requests.get(url, headers=headers)
    #print(z.status_code)
    if z.status_code == 200:
       # 这边是查找所有的ouid
        alluid = etree.HTML(z.content).xpath('//div[@class="m-auto-box"]/@action-data')
        #print(alluid)
        #print(etree.HTML(z.content).xpath('/html/body/div[1]/section/div[2]/span'))
    return alluid
 
def getname(url):
    #url = 'http://hongbao.weibo.com/h5/aboutyou?groupid=1000110'
    # 带上request headers
    z = requests.get(url, headers=headers)
    #print(z.status_code)
    if z.status_code == 200:
       # 这边是查找所有的ouid
       name=etree.HTML(z.content).xpath('//span[@class="nickname"]')
       print(name[0].text)
    return name
 
# 获取st的值
 
 
def getst(url):
    # 带上request headers
    z = requests.get(url, headers=headers)
    # 获取第一段JavaScript,并去掉 <!--拆包页-->,防止中文报错
    jscode = etree.HTML(z.content).xpath(
        "//script[contains(., 'weibo')]/text()")[0].replace(u'<!--拆包页-->', '')
    # 使用js2xml 把JavaScript代码替换成xml
    parsed_js = js2xml.parse(jscode)
    # 打印下 xml
    # print js2xml.pretty_print(parsed_js)
 
    # 从上面可以看到st在哪,然后用xpath写出来
    st = parsed_js.xpath('//property[@name="st"]/string/text()')[0]
    return st
# 抢红包
 
 
def tj(url, uid, st, tjheaders):
    # 生成需要发送的data
    data = {
    'groupid': '1000110',
    'uid': uid,
    'share': '1',
    'st': st
    }
    # 这里使用了post,headers增加了Referer
    z = requests.post(url, data=data, headers=tjheaders)
    #print(z.json())
    # 把得到的结果以json形式展示
    _ = z.json()
    # 如果json中有“ok”,表示提交成功了,否则返回报错信息
    #print(_['ok'])
    #if _['ok']!=1:
        #print(_)
    print (_['data']['result']['name'])
   # else:
        #print (_['error_code'])
if __name__ == '__main__':
    # 得到所有的uid
    uids = getuid()
    # 获取st
    #print(st)
    url = 'http://hongbao.weibo.com/h5/aboutyou?groupid=1000110'
    getname(url)
    st = getst(url)
    for uid in uids:
        #print(uid)
        # 生成红包页面的url
        url = 'http://hongbao.weibo.com/h5/aboutyou?groupid=1000110&ouid=%s' %uid
         
        # 生成点击“抢红包”页面的url
        tjurl = 'http://hongbao.weibo.com/aj_h5/lottery?uid=%s&groupid=1000110&wm=' %uid
        getname(url)
        # 添加Referer,如果不添加会报错
        headers['Referer'] = url
        tjheaders = headers
        try:
        # 点击“抢红包”
            tj(tjurl,uid,st,tjheaders)
        except:
            pass

  

posted @   陈泽泽  阅读(2504)  评论(0编辑  收藏  举报
编辑推荐:
· Java 中堆内存和栈内存上的数据分布和特点
· 开发中对象命名的一点思考
· .NET Core内存结构体系(Windows环境)底层原理浅谈
· C# 深度学习:对抗生成网络(GAN)训练头像生成模型
· .NET 适配 HarmonyOS 进展
阅读排行:
· 如何给本地部署的DeepSeek投喂数据,让他更懂你
· 超详细,DeepSeek 接入PyCharm实现AI编程!(支持本地部署DeepSeek及官方Dee
· 用 DeepSeek 给对象做个网站,她一定感动坏了
· .NET 8.0 + Linux 香橙派,实现高效的 IoT 数据采集与控制解决方案
· .NET中 泛型 + 依赖注入 的实现与应用
点击右上角即可分享
微信分享提示