索引

saltstack入门
salt state sls 描述文件
saltstack配置管理高级功能

saltstack入门

192.168.86.3 salt
修改
[root@Zabbix-sever salt]# salt-key -L
Accepted Keys:
Denied Keys:
Unaccepted Keys:
members.sunpie.com
node2.com
Zabbix-sever
minion名称
/bin/rm /etc/salt/minion_id
systemctl restart salt-minion.service 


sudo yum install https://repo.saltstack.com/yum/redhat/salt-repo-latest-1.el7.noarch

/etc/salt/master State file location
file_roots:
  base:
    - /srv/salt

zeromq
4505 send message
4506 receive message

 yum install -y python-setproctitle #显示进程名
root     44524     1 27 08:27 ?        00:00:00 /usr/bin/python /usr/bin/salt-master ProcessManager
root     44536 44524  0 08:27 ?        00:00:00 /usr/bin/python /usr/bin/salt-master MultiprocessingLoggingQueue

让grains生效两种方法
systemctl restart salt-minion
salt '*' saltutil.sync_grains

  [ "$PS1" = "\\s-\\v\\\$ " ] && PS1="[\u@\h \w]\\$ "
  [ "$PS1" = "\\s-\\v\\\$ " ] && PS1="[\u@\h \W]\\$ "
wW相对路径,绝对路径

[root@Zabbix-server /srv/salt/_grains]# cat my_grains.py 
#!/usr/bin/env python
#-*- coding:utf-8 -*-
def my_grains():
        #初始化字典
        grains={}
        grains['iaas']='openstack'
        grains['edu']='oldboy'
        return grains
分发py文件
[root@Zabbix-server ~]# salt '*' saltutil.sync_grains 
Zabbix-sever:
    - grains.my_grains
node2.com:
    - grains.my_grains

[root@Zabbix-server /var/cache/salt]# salt '*' grains.item iaas 
node2.com:
    ----------
    iaas:
        openstack
Zabbix-sever:
    ----------
    iaas:
        openstack
[root@Zabbix-server /var/cache/salt]# 


Grians优先级:
     1.系统自带,
     2.grains文件写的
     3.minion配置文件写的
     4.自己写的。

salt '*' state.apply这样就会执行top。sls中定义的行为
top.sls
#####      State System settings     #####
##########################################
# The state system uses a "top" file to tell the minions what environment to
# use and what modules to use. The state_top file is defined relative to the
# root of the base environment as defined in "File Server settings" below.
#state_top: top.sls

# The master_tops option replaces the external_nodes option by creating
# a plugable system for the generation of external top data. The external_nodes
# option is deprecated by the master_tops option.
#
# To gain the capabilities of the classic external_nodes system, use the
# following configuration:
# master_tops:
#   ext_nodes: <Shell command which returns yaml>
#
#master_tops: {}


[root@Zabbix-server /srv/pillar/web]# salt '*' pillar.items apache
Zabbix-sever:
    ----------
    apache:
node2.com:
    ----------
    apache:
        httpd
[root@Zabbix-server /srv/pillar/web]# salt '*' saltutil.refresh_pillar
node2.com:
    True
Zabbix-sever:
    True
[root@Zabbix-server /srv/pillar/web]# salt '*' pillar.items apache    
Zabbix-sever:
    ----------
    apache:
node2.com:
    ----------
    apache:
        httpd

[root@Zabbix-server /srv]# tree .
.
├── pillar
│   ├── top.sls
│   └── web
│       └── apache.sls
└── salt
    ├── _grains
    │   └── my_grains.py
    ├── top.sls
    └── web
        └── apache.sls

[root@Zabbix-server /srv/pillar/web]# salt -I 'apache:httpd' cmd.run 'w'      
node2.com:
     23:45:26 up  2:05,  2 users,  load average: 0.00, 0.01, 0.05
    USER     TTY      FROM             LOGIN@   IDLE   JCPU   PCPU WHAT
    root     pts/0    192.168.86.1     21:40    1:44m  0.15s  0.15s -bash
[root@Zabbix-server /srv/pillar/web]# 

1.目标选择

Grains VS Pillar

         类型     数据采集方式      应用场景                      定义位置

Grains   静态    minion启动时收集  数据查询  目标选择  配置管理   minion
Pillar   动态     master自定义     目标选择  配置管理  敏感数据   master

目标选择
1,通配符,正则表达式
salt -E '(node1|node2)' test.ping
2,

[root@node1 ~]# salt -S 192.168.86.0/24 test.ping
node2.com:

https://www.unixhot.com/docs/saltstack/topics/targeting/index.html


#####         Node Groups           #####
##########################################
# Node groups allow for logical groupings of minion nodes. A group consists of a group
# name and a compound target.
#nodegroups:
#  group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
#  group2: 'G@os:Debian and foo.domain.com'
nodegroups:
  web: 'L@node2.com,node1.com'
  group1: 'L@node1.com,node2.com'
  group2: 'L@node2.com and node2.com'
[root@node1 /etc/salt]# systemctl restart master
[root@node1 /etc/salt]# salt -N web cmd.run "w"



https://www.unixhot.com/docs/saltstack/topics/execution/index.html
远程执行
salt的模块都在
/usr/lib/python2.7/site-packages/salt/modules

service
network
salt '*' network.active_tcp
salt '*' service.available sshd
salt '*' service.restart sshd
salt '*' state.single pkg.installed name=lsof


自定义模块
1,建目录
2,写py文件
3,刷新
[root@node1 /srv/salt/_modules]# pwd
/srv/salt/_modules
[root@node1 /srv/salt/_modules]# ll
total 4
-rw-r--r-- 1 root root 58 Jan  1 05:33 my_disk.py
[root@node1 /srv/salt/_modules]# cat my_disk.py 
def list():
        cmd='df -h'
        return __salt__['cmd.run'](cmd)
[root@node1 /srv/salt/_modules]# 
salt '*' saltutil.sync_modules 
salt '*' my_disk.list

[root@node2 salt]# tree /var/cache/salt/
/var/cache/salt/
└── minion
    ├── accumulator
    ├── extmods
    │   ├── grains
    │   │   ├── my_grains.py
    │   │   └── my_grains.pyc
    │   └── modules
    │       └── my_disk.py
    ├── files
    │   └── base
    │       ├── _grains
    │       │   └── my_grains.py
    │       ├── _modules
    │       │   └── my_disk.py
    │       ├── top.sls
    │       └── web
    │           └── apache.sls
    ├── highstate.cache.p
    ├── module_refresh
    ├── pkg_refresh
    ├── proc
    └── sls.p


[root@node1 ~]# salt '*' saltutil.refresh_modules
node2.com:
    True
node1.com:
    True

salt state sls 描述文件

名称ID声明, 默认是name声明
[root@node1 /srv/salt/web]# cat apache.sls 
apache-install: #	id声明
  pkg.installed:
    - names:
      - httpd
      - httpd-devel

apache-service:   #id声明 高级状态,id必须唯一(可以不唯一但是容易出问题)
  service.running:  #State声明 状态声明
    - name: httpd  #选项声明
    - enable: True

lamp安装
pkg常用模块,
pkg.installed
pkg.group-installed
pkg.lated  ensure the latest version
pkg.purge uninstall


1,安装软件 pkg
2,配置文件 file
3,启动 service

一个状态模块不能重复使用

file_roots:
  base:
    - /srv/salt
这时下面的文件就是在/srv/salt/lamp/files/my.cnf
    - source: salt://lamp/files/my.cnf

salt 'node2.com' state.sls lamp.lamp

[root@node1 /srv/salt/lamp]# cat lamp.sls 
lamp-pkg:
  pkg.installed:
    - pkgs:
      - mariadb
      - mariadb-server
      - php
      - httpd
      - php-mysql
      - php-cli
      - php-mbstring

apache-config:
  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://lamp/files/httpd.conf
    - user: root
    - group: root
    - mode: 644

php-config:
  file.managed:
    - name: /etc/php.ini
    - source: salt://lamp/files/php.ini
    - user: root
    - group: root
    - mode: 644

mysql-config:
  file.managed:
    - name: /etc/my.cnf
    - source: salt://lamp/files/my.cnf
    - user: root
    - group: root
    - mode: 644

apache-service:
  service.running:
    - name: httpd
    - enable: True
    - reload: True

mysql-service:
  service.running:
    - name: mariadb
    - enable: True
    - reload: True

也可以这么写
apache-server:
  pkg.installed:
    - pkgs:
      - httpd
      - php
  file.managed: 
    - name: /etc/php.ini
    - source: salt://lamp/files/php.ini
    - user: root
    - group: root
    - mode: 644
  service.running:
    - name: httpd
    - enable: True
    - reload: True


1。我依赖谁: require
apache-service:
  service.running:
    - name: httpd
    - enable: True
    - reload: True
    - require: 
      - pkg: lamp-pkg
2。我被谁依赖: require-in
3。我监控谁:watch,watch本身包含require
apache-service:
  service.running:
    - name: httpd
    - enable: True
    - reload: True
    - require: 
      - pkg: lamp-pkg
    - watch:
      - file: apache-config
1,如果apache-config这个id状态发生变化就reload
2,如果不加reload=Ture,那么就restart

4。我被谁监控:watch-in
5。我引用谁
新建lamp.pkg,然后再apache.sls中加入下面的行,
[root@node1 /srv/salt/lamp]# cat init.sls 
include:
  - lamp.pkg
  - lamp.config
  - lamp.service
安装,配置,启动,这样分,也可以单独写,可以让别的模块include,-lamp.apache,- lamp.mysql
6。我扩展谁


如何编写sls技巧:
1,按照状态分类 如果单独使用,很清晰
2,按照服务分类 可以被其他的SLS include。如LNMP include mysql



yaml-jinja2
两种分隔符:{%  %}表达式, {{    }}变量

1,要使用template
    - template: jinja
2,列出参数列表
    - defaults:
      PORT: 88
3,在模板引用
Listen {{ PORT }}
实例如下:
[root@node1 /srv/salt/lamp]# cat config.sls 
apache-config:
  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://lamp/files/httpd.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      PORT: 88

jinjia模板3个地方可以使用,pillar,grains,salt
1,写在模板
[root@node1 /srv/salt/lamp]# vim files/httpd.conf
Listen {{ grains['fqdn_ip4'][0] }}:{{ PORT }}

[root@node1 /srv/salt/lamp]# salt '*' network.hw_addr eno16777736
node2.com:
    00:0c:29:48:1b:64
node1.com:
    00:0c:29:77:60:c0

Listen {{ grains['fqdn_ip4'][0] }}:{{ PORT }}
# hardware address {{ salt['network.hw_addr']('eno16777736') }}


# pillar {{ pillar['apache'] }}
2,在sls里面写,比较清晰,能看到所有的变量
apache-config:
  file.managed:
    - name: /etc/httpd/conf/httpd.conf
    - source: salt://lamp/files/httpd.conf
    - user: root
    - group: root
    - mode: 644
    - template: jinja
    - defaults:
      ADDR:  {{ grains['fqdn_ip4'][0] }}
      PORT: 88


salt 'node2.com' state.sls lamp.init

https://github.com/saltstack-formulas

Compound matchers allow very granular minion targeting using any of Salt's matchers. 
[root@node1 ~]# salt -C '* not G@fqdn_ip4:192.168.86.4' test.ping                  
node1.com:
    True
[root@node1 ~]# salt -C '* not G@fqdn_ip4:192.168.86.4' test.ping
node1.com:
    True
[root@node1 ~]# salt -C '* not I@apache:httpd' test.ping                          
node1.com:
    True
[root@node1 ~]# salt -C '* not I@apache:httpd' test.ping^C
[root@node1 ~]# 

生产环境要先执行test等于True
root salt '*' state.highstate
root salt '*' state.highstate test=true


建议所有file模块都加上backup
  file.managed:
    - name: /etc/zabbix/zabbix_agentd.conf
    - source: salt://init/files/zabbix_agentd.conf
    - backup: minion


haproxy下载
https://github.com/haproxy/haproxy/archive/master.zip

切换base环境

salt '*' state.sls haproxy.install saltenv=prod
继续学习状态间关系

条件判断:
unless: 条件为真就
onlyif


haproxy 配置
global
maxconn 100000
chroot /usr/local/haproxy
uid 99  
gid 99 
daemon
nbproc 1 
pidfile /usr/local/haproxy/logs/haproxy.pid 
log 127.0.0.1 local3 info

defaults
option http-keep-alive
maxconn 100000
mode http
timeout connect 5000ms
timeout client  50000ms
timeout server 50000ms

listen stats
mode http
bind 0.0.0.0:8888
stats enable
stats uri     /haproxy-status 
stats auth    haproxy:saltstack

frontend frontend_www_example_com
bind 192.168.3.21:80
mode http
option httplog
log global
    default_backend backend_www_example_com

backend backend_www_example_com
option forwardfor header X-REAL-IP
option httpchk HEAD / HTTP/1.0
balance source
server web-node1  192.168.3.21:8080 check inter 2000 rise 30 fall 15
server web-node2  192.168.3.22:8080 check inter 2000 rise 30 fall 15

saltstack配置管理高级功能

git clone https://github.com/unixhot/saltbook-code

修改install。sls中目录添加modules
salt '*' state.sls modules.keepalived.install saltenv=prod

可以这样设置参数
[root@node1 /srv/salt/prod/modules/keepalived]# cat install.sls
{% set keepalived_tar =  'keepalived-1.2.17.tar.gz'  %}
keepalived-install:
  file.managed:
    - name: /usr/local/src/{{ keepalived_tar }}
    - source: salt://modules/keepalived/files/{{ keepalived_tar }}
    - mode: 755
    - user: root
    - group: root
  cmd.run:
    - name: cd /usr/local/src && tar zxf {{ keepalived_tar }} && cd keepalived-1.2.17 && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install
    - unless: test -d /usr/local/keepalived
    - require:
      - file: keepalived-install


under /root/saltbook-code/salt/prod/cluster
copy haproxy-outside-keepalived.sls

ll files/haproxy-outside-keepalived.conf

这两个文件到/srv/salt/prod/cluster 还有files对应的目录
然后修改这两个文件



listen stats
mode http
bind 0.0.0.0:8888
stats enable
stats uri     /haproxy-status
stats auth    haproxy:saltstack

frontend frontend_www_example_com
bind 192.168.3.21:80
mode http
option httplog
log global
    default_backend backend_www_example_com

backend backend_www_example_com
option forwardfor header X-REAL-IP
option httpchk HEAD / HTTP/1.0
balance source
server web-node1  192.168.3.3:8080 check inter 2000 rise 30 fall 15
server web-node2  192.168.3.4:8080 check inter 2000 rise 30 fall 15


php session share
http://php.net/manual/en/memcached.sessions.php


CDN  一般都有302总调度器


job管理

saltutl



[root@node1 /usr/lib/systemd/system]# cat /usr/lib/systemd/system/nginx.service
[Unit]
Description=dengshen
After=network.target

[Service]
Type=forking
PIDFile=/usr/local/nginx/logs/nginx.pid
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s stop


[Install]
WantedBy=multi-user.target
[root@node1 /usr/lib/systemd/system]#

安装完成的service文件
[root@node2 ~]# cat /usr/lib/systemd/system/nginx.service
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
PIDFile=/run/nginx.pid
# Nginx will fail to start if /run/nginx.pid already exists but has the wrong
# SELinux context. This might happen when running `nginx -t` from the cmdline.
# https://bugzilla.redhat.com/show_bug.cgi?id=1268621
ExecStartPre=/usr/bin/rm -f /run/nginx.pid
ExecStartPre=/usr/sbin/nginx -t
ExecStart=/usr/sbin/nginx
ExecReload=/bin/kill -s HUP $MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true

[Install]
WantedBy=multi-user.target


配置文件放在业务层面,



return write in mysql
[root@node1 /var/cache/salt/master]# tail /etc/salt/master
#####      Returner settings          ######
############################################
# Which returner(s) will be used for minion's result:
#return: mysql
return: mysql
mysql.host: '192.168.3.3'
mysql.port: '3306'
mysql.user: 'salt'
mysql.pass: 'salt@pw'
mysql.db: 'salt'


[root@node1 /var/cache/salt/master]# salt '*' saltutil.running
node2.com:
node1.com:



 salt-run jobs.list_jobs|head -50
 salt-run jobs.lookup_jid 20170213020211363725

 1102  2017-02-13 05:48:30 root salt-run jobs.list_jobs
 1103  2017-02-13 05:48:58 root salt-run jobs.list_jobs|head
 1104  2017-02-13 05:49:29 root salt-run jobs.list_jobs|head -50
 1105  2017-02-13 05:50:08 root salt-run jobs.lookup_jid 20170213020211363725
 1106  2017-02-13 05:57:32 root salt-run manage.statue
 1107  2017-02-13 05:57:35 root salt-run manage.status
 1108  2017-02-13 05:57:47 root salt-run manage.versions
 1109  2017-02-13 05:57:56 root salt-run manage.up


master-less
salt-call
minion需要停止服务 salt-call --local highstate


multi master:
/etc/salt/pki/master/minions   公钥文件
file_root 
pillar_root


minion配置
master:
  - 192.168.3.4
  - 192.168.3.3

==== sls使用git或者svn管理

salt syndic
 1, 必须运行在一个master上
 salt-master->  saltmaster+salt syndic -> salt minion