目录

列表的使用
zookeeper id 使用主机名最后的编号
判断一个文件是否是默认的,如果是就清空
blockinfile+lookup使用
replace+lookup使用
ansible2.4 api 调用

列表的使用

我们有一个列表如下(主机为server7-13)

(py3) [root@jumpserver app]# cat server_elk.yml
- hosts:  ~server[7-9]$:~server1[0-3]$
  vars:
    topics_list:
      - test
      - test1
    cluster_list:
      - server7
      - server8
      - server9
      - server10
      - server11
      - server12
      - server13
  roles:
   # - common
    - elk

如我们需要用logstash往es里面写,下面是我们需要的效果

input {
  kafka {
    bootstrap_servers => "server7:9096,server8:9096,server9:9096,server10:9096,server11:9096,server12:9096,server13:9096"
    topics => ["test","test1"]
  }
}
filter {
  json {
    source => "message"
  }
  mutate {
  convert => { "upstream_response_time" => "float" }
  convert => { "request_time" => "float" }
  convert => { "status" => "integer" }
  remove_field => "message"
  }
  geoip {
      source => "remote_addr"
      add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
      add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}"  ]
  }
  mutate {
    convert => [ "[geoip][coordinates]", "float"]
  }
}

output {
  elasticsearch {
    hosts => ["server7:9200","server8:9200","server9:9200","server10:9200","server11:9200","server12:9200","server13:9200"]
    index => "logstash-%{[type]}-%{+YYYY.MM.dd}"
  }
}

ansible的template可以这样写

input {
  kafka {
    bootstrap_servers => "{{ cluster_list|map('regex_replace', '^(.*)$', '\\1:9096')|join(',') }}"
    topics => [{{ topics_list|map('regex_replace', '^(.*)$', '"\\1"')|join(',') }}]
  }
}
filter {
  json {
    source => "message"
  }
  mutate {
  convert => { "upstream_response_time" => "float" }
  convert => { "request_time" => "float" }
  convert => { "status" => "integer" }
  remove_field => "message"
  }
  geoip {
      source => "remote_addr"
      add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
      add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}"  ]
  }
  mutate {
    convert => [ "[geoip][coordinates]", "float"]
  }
}

output {
  elasticsearch {
    hosts => [{{ cluster_list|map('regex_replace', '^(.*)$', '"\\1:9200"')|join(',') }}]
    index => "logstash-%{[type]}-%{+YYYY.MM.dd}"
  }
}

zookeeper id 使用主机名最后的编号

如server7的myid就是7

[root@server11 ~]# cat /opt/zookeeper/data/myid
11

template 中需要

# cat  roles/common/templates/myid.j2
{{ ansible_hostname|replace('-','/') |basename}}

判断一个文件是否是默认的,如果是就清空

比如我们安装filebeat,然后/etc/filebeat/filebeat.yml这个文件是默认的话,我们就需要清空,不是我们需要用blockinfile往里面填东西,由于有多次添加的需要,而且还需要做成状态的所以有了这个需求

- name: reset file config is the /etc/filebeat/filebeat.yml file is default
  shell: "[[ $(md5sum /etc/filebeat/filebeat.yml) == 582eb1601bde332db2d09d2ee2d8286b* ]] && > /etc/filebeat/filebeat.yml"
  ignore_errors: True

blockinfile + lookup 使用

lookup 可以lookup很多东西如dnsfile等等具体可以看(官方文档)[http://docs.ansible.com/ansible/latest/playbooks_lookups.html]

- name: copy filebeat header config
  blockinfile:
    path: /etc/filebeat/filebeat.yml
    block: "{{ lookup('template', 'header.yml') }}"
    marker: "# {mark} filebeat header"
    insertbefore: BOF
  notify:
    - restart filebeat


- name: copy filebeat.prospectors config
  blockinfile:
    path: /etc/filebeat/filebeat.yml
    block: "{{ lookup('template', 'input_type.yml') }}"
    marker: "# {mark} filebeat {{ document_type }}"
    insertafter: "# END filebeat header"
  notify:
    - restart filebeat

- name: copy filebeat output config
  blockinfile:
    path: /etc/filebeat/filebeat.yml
    block: "{{ lookup('template', 'output.yml') }}"
    marker: "# {mark} filebeat output"
    insertafter: EOF
  notify:
    - restart filebeat

replace + lookup 使用

这个file文件就放在roles/nginx_conf/files下面

- name: add access_log_json
  blockinfile:
    path: "{{ nginx_file }}"
    marker: "# {mark} access_log_json "
    insertafter: 'http_x_forwarded_for"'
    content: "{{ lookup('file', './access_log_json') }}"
    backup: yes
  notify:
    - reload nginx

- name: change file to access_log_json
  replace:
    path: "{{ nginx_file }}"
    regexp: 'access_log  {{ logfile }} main;'
    replace: 'access_log  {{ logfile }} access_log_json;'
    backup: yes
  notify:
    - reload nginx
# cat roles/nginx_conf/files/access_log_json
    log_format  access_log_json      '{"remote_addr":"$remote_addr","host":"$host","time_iso8601":"$time_iso8601","request":"$request","status":"$status","body_bytes_sent":"$body_bytes_sent","http_referer":"$http_referer","http_user_agent":"$http_user_agent","http_x_forwarded_for":"$http_x_forwarded_for","upstream_response_time":"$upstream_response_time","uri":"$uri","request_time":"$request_time"}'

ansible2.4 api 调用

需要更改的地方就是inventory的地方,sources改成自己inventory文件的位置,然后hosts配置成自己需要操作的主机名就行了,这里面好多参数其实都可以ansible --help来看,一下是根据官网的例子改写的,不过就简单的能跑shell模块了。

#!/usr/bin/env python

import json
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase

class ResultCallback(CallbackBase):
    """A sample callback plugin used for performing an action as results come in

    If you want to collect all results into a single object for processing at
    the end of the execution, look into utilizing the ``json`` callback plugin
    or writing your own custom callback plugin
    """
    def v2_runner_on_failed(self, result, **kwargs):
        print(result._host)

    def v2_runner_on_unreachable(self, result):
        print("*"*10, "v2_runner_on_unreachable")
        print(result._host)

    def v2_runner_on_ok(self, result, **kwargs):
        """Print a json representation of the result

        This method could store the result in an instance attribute for retrieval later
        """
        host = result._host
        print(json.dumps({host.name: result._result}, indent=4))


Options = namedtuple('Options', ['connection', 'module_path', 'forks', 'become', 'become_method', 'become_user', 'check', 'diff'])
# initialize needed objects
loader = DataLoader()
options = Options(connection='smart', module_path='/path/to/mymodules', forks=100, become=None, become_method=None, become_user=None, check=False,
                  diff=False)
passwords = dict(vault_pass='secret')

# Instantiate our ResultCallback for handling results as they come in
results_callback = ResultCallback()

# create inventory and pass to var manager
#inventory = InventoryManager(loader=loader, sources=['localhost'])
inventory = InventoryManager(loader=loader, sources=['/etc/ansible/d_inv.py'])
variable_manager = VariableManager(loader=loader, inventory=inventory)

# create play with tasks
play_source =  dict(
        name = "Ansible Play",
        hosts = 'node1.com',
        gather_facts = 'no',
        tasks = [
            #dict(action=dict(module='shell', args='ls'), register='shell_out'),
            dict(action=dict(module='shell', args='route -n'), ),
            #dict(action=dict(module='debug', args=dict(msg='{{shell_out.stdout}}')))
         ]
    )
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)

# actually run it
tqm = None
try:
    tqm = TaskQueueManager(
              inventory=inventory,
              variable_manager=variable_manager,
              loader=loader,
              options=options,
              passwords=passwords,
              stdout_callback=results_callback,  # Use our custom callback instead of the ``default`` callback plugin
          )
    result = tqm.run(play)
finally:
    if tqm is not None:
        tqm.cleanup()