二进制部署kafka

部署KAFKA集群

机器:

eks-zk-kafka001 10.0.0.41
eks-zk-kafka002 10.0.0.42
eks-zk-kafka003 10.0.0.43

初始化单机环境

#关闭防火墙
systemctl disable --now firewalld 
systemctl disable --now dnsmasq
systemctl disable --now NetworkManager

#关闭selinux
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config

#关闭swap分区,fstab注释swap
swapoff -a && sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

#同步机器时间
安装ntpdate
rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
yum install ntpdate -y

所有节点同步时间。时间同步配置如下:
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com
# 加入到crontab
*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

ulimit -SHn 65535

vim /etc/security/limits.conf
# 末尾添加如下内容
* soft nofile 65536
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited 

部署kafka集群

前提:已经部署完zookeeper集群

下载地址:https://kafka.apache.org/downloads

#创建目录
mkdir -p /home/work
#把tar包解压到数据目录
tar -xvf kafka_2.13-3.7.0.tgz -C /home/work
#修改配置文件
cat config/server.properties | egrep -v '^#|^$'
broker.id=0
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/work/kafka/logs/kafka-logs  #根据自己的情况修改
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=zk-kafka001:2181,zk-kafka002:2181,zk-kafka003:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0

启动

./kafka-server-start.sh -daemon ../config/server.properties

常用命令:

#列出所有的topic
./bin/kafka-topics.sh --list --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092
#创建一个topic
./kafka-topics.sh --create --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092 --replication-factor 1 --partitions 1 --topic testTopic
#查看指定Topic
./bin/kafka-topics.sh --describe --topic sunlixin-event-log  10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092
#增加分区
bin/kafka-topics.sh  10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092 --alter --topic ops_coffee --partitions 5
#删除Topic
./bin/kafka-topics.sh  --delete --topic your_topic_name  --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092
#启动 Kafka 控制台消费者,并从指定的主题(topic)开始消费消息
./kafka-console-consumer.sh --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092  --topic sunlixin-event-log  --from-beginning

#查看Group列表
bin/kafka-consumer-groups.sh --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092  --list
#查看Group消费情况
bin/kafka-consumer-groups.sh --bootstrap-server 10.175.234.41:9092,10.176.19.12:9092,10.176.23.40:9092  --group logstash-group-sunlixin  --describe

部署kubernetes-event-exporter组件

1,创建namespace

apiVersion: v1
kind: Namespace
metadata:
  name: sun-metrics

2, 创建ServiceAccount

apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: sun-metrics
  name: event-exporter

3, 创建ClusterRole 和ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: event-exporter-role
rules:
- apiGroups: [""]
  resources: ["events"]
  verbs: ["get", "list", "watch"]  

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: event-exporter-binding
subjects:
- kind: ServiceAccount
  name: event-exporter
  namespace: sun-metrics
roleRef:
  kind: ClusterRole
  name: event-exporter-role
  apiGroup: rbac.authorization.k8s.io

4,创建ConfigMap

apiVersion: v1
kind: ConfigMap
metadata:
  name: event-exporter-cfg
  namespace: sun-metrics
data:
  config.yaml: |该工具允许将经常遗漏的Kubernetes事件导出到各种输出,以便用于可观察性或警报目的
    logLevel: error
    logFormat: json
    route:
      routes:
      - match:
        - receiver: "kafka"
    receivers:
      - name: "kafka"
        kafka:
          clientId: "sunlixin-test"
          topic: "sunlixin-event-log"
          brokers:
          - "10.175.234.41:9092"
          - "10.176.19.12:9092"
          - "10.176.23.40:9092"
          compressionCodec: "gzip"

5,部署Deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: sun-exporter
  namespace: sun-metrics
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: sun-exporter
        version: v1
    spec:
      serviceAccountName: sun-exporter
      containers:
      - name: event-exporter
        image: opsgenie/kubernetes-event-exporter:0.9
        imagePullPolicy: IfNotPresent
        args:
        - -conf=/data/config.yaml
        volumeMounts:
        - mountPath: /data
          name: cfg
      volumes:
        - name: cfg
          configMap:
            name: event-exporter-cfg
  selector:
    matchLabels:
      app: event-exporter
      version: v1

 

posted @ 2024-03-31 22:36  百因必有果  阅读(115)  评论(0编辑  收藏  举报