apscheduler 设置python脚本定时任务

 

 

理论概念:https://zhuanlan.zhihu.com/p/95563033

 BlockingScheduler与BackgroundScheduler区别 :https://www.jianshu.com/p/b829a920bd33

 

apscheduler 设置循环任务:每隔5S 提交一次监控数据

#!/usr/bin/env python
# -*- coding:utf-8 -*- 
import time,os,sys #定时提交,循环睡眠用

import atexit

# 需要先安装导入包
# pip install requests
# pip install requests-aws4auth
import requests
from requests_aws4auth import AWS4Auth
import logging,datetime
BASE_DIR = os.path.dirname(__file__)
print(BASE_DIR)
sys.path.append(BASE_DIR)
print(sys.path)
from monitor.monitor import Monitor
import sys ,json
# import Queue
import threading
import time


logger = logging.getLogger("mylogger")
logger.setLevel("DEBUG")
ch = logging.StreamHandler()
ch.setLevel("DEBUG")
logger.addHandler(ch)

logger.debug("推送监控数据-----")
region = 'cn-beijing-6'
service = 'monitor'
host = 'http://%s.%s.api.ksyun.com' % (service, region)
headers = {
    'Accept': 'Application/json'
}
# 自己的ak/sk
ak = "XXXXXXXXXXXXXXX"
sk = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# debug 输出调试信息
logger.debug("region:" + region + ",service:" + service + ",host:" + host + ",ak:" + ak + ",sk:" + sk)
credentials = {
    'ak': ak,
    'sk': sk
}
def auth():
    return AWS4Auth(credentials['ak'], credentials['sk'], region, service)

query = {
    'Action': 'PutMetricData',
    'Version': '2017-07-01'
}


def getUtcTimeStampStr():
    utctime = time.gmtime()
    utc_str = time.strftime("%Y-%m-%dT%H:%M:%SZ", utctime)
    # utc_str = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
    return utc_str

def get_data():
    m = Monitor()
    cpu_info, mem_info, swap_info = m.cpu(), m.mem(), m.swap()
    utc_time = getUtcTimeStampStr()
    json_data = [
        {
            "namespace": "ZMD_Host_Monitor",
            "metricName": "cpu_percent",
            "timestamp": utc_time,
            "value": cpu_info.get('percent_avg'),
            "dimensions": [
                "product=Zmd_Host_Monitor",
                "apiname=zmd_cpu_test"
            ],
            "unit": "Percent"
        },
        {
            "namespace": "ZMD_Host_Monitor",
            "metricName": "mem_percent",
            "timestamp": utc_time,
            "value": mem_info.get('percent'),
            "dimensions": [
                "product=Zmd_Mem_Monitor",
                "apiname=zmd_mem_test"
            ],
            "unit": "Percent"
        },
        {
            "namespace": "ZMD_Host_Monitor",
            "metricName": 'mem_total',
            "timestamp": utc_time,
            "value": mem_info.get('total'),
            "dimensions": [
                "product=Zmd_Mem_Monitor",
                "apiname=zmd_mem_test"
            ],
            "unit": "Gigabytes"
        },
        {
            "namespace": "ZMD_Host_Monitor",
            "metricName": 'mem_used',
            "timestamp": utc_time,
            "value": mem_info.get('used'),
            "dimensions": [
                "product=Zmd_Mem_Monitor",
                "apiname=zmd_mem_test"
            ],
            "unit": "Gigabytes"
        },
        {
            "namespace": "ZMD_Host_Monitor",
            "metricName": "mem_free",
            "timestamp": utc_time,
            "value": mem_info.get('free'),
            "dimensions": [
                "product=Zmd_Mem_Monitor",
                "apiname=zmd_mem_test"
            ],
            "unit": "Gigabytes"
        }
    ]
    logger.debug(json_data)
    return json_data





#启动入口
if __name__ == "__main__":
    from apscheduler.schedulers.blocking import BlockingScheduler
    # from apscheduler.jobstores.mongodb import MongoDBJobStore
    # from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
    from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
    from apscheduler.jobstores.memory import MemoryJobStore
    from apscheduler.jobstores.redis import RedisJobStore
    import redis
    import pickle


    def my_job(id='my_job'):
        response = requests.post(host, params=query, headers=headers, auth=auth(), json=get_data())
        logger.debug(response.text)
        print(id, '-->', datetime.datetime.now())

    connect_args = {
        'host': '192.168.1.8',
        'port': 6379,
        'password': ''
    }
    jobstores = {
        'default': RedisJobStore(db=13,
                                 jobs_key='apscheduler.jobs',
                                 run_times_key='apscheduler.run_times',
                                 pickle_protocol=pickle.HIGHEST_PROTOCOL,
                                 **connect_args)
    }
    # executors = {
    #     'default': ThreadPoolExecutor(10),
    #     'processpool': ProcessPoolExecutor(5)
    # }
    # job_defaults = {
    #     'coalesce': False,
    #     'max_instances': 3
    # }
    # scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults,
    #                               timezone="Asia/Shanghai")
########
    # jobstores = {
    #     'default': MemoryJobStore()  # 使用内存作为作业存储
    # }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(10)
    }
    job_defaults = {
        'coalesce': True,  # 重启后作业如果被堆叠,只执行一次
        'max_instances': 3
    }
    scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
    scheduler.add_job(my_job, args=['job_interval', ], id='job_interval', trigger='interval', seconds=5,
                      replace_existing=True)
    # scheduler.add_job(my_job, args=['job_cron', ], id='job_cron', trigger='cron', month='4-8,11-12', hour='20-23', second='*/10', \
    #                   end_date='2020-6-16')
    # scheduler.add_job(my_job, args=['job_once_now', ], id='job_once_now')
    # scheduler.add_job(my_job, args=['job_date_once', ], id='job_date_once', trigger='date',
    #                   run_date='2020-6-15 08:34:00')
    try:
        scheduler.start()
    except SystemExit:
        print('exit')
        exit()

 

posted on 2020-11-18 17:21  zhangmingda  阅读(338)  评论(0编辑  收藏  举报

导航