tidb安装
TiDB mysql数据库安装
#echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
#echo 1 > /proc/sys/net/ipv4/ip_forward
#cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
#sysctl --system
#cat > /etc/modules-load.d/ip_vs.conf << EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF
#yum -y install yum-utils device-mapper-persistent-data lvm2
#yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#yum list docker-ce --showduplicates | sort -r
#yum -y install docker-ce-20.10.10-3.el7 docker-ce-cli-20.10.10
#mkdir /etc/docker
#cat > /etc/docker/daemon.json << \EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": ["http://hub-mirror.c.163.com"]
}
EOF
#mkdir -p /etc/systemd/system/docker.service.d
#mkdir -p /data/docker
#ln -s /data/docker /var/lib/docker
#systemctl daemon-reload
#systemctl start docker
#systemctl enable docker
#cat >/etc/yum.repos.d/kubernetes.repo << \EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#yum -y install kubelet-1.18.5-0 kubeadm-1.18.5-0 kubectl-1.18.5-0
#systemctl enable kubelet
#mkdir /data/kubernetes && cd /data/kubernetes
#cat >./kubeadm-config.yaml << \EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.5
imageRepository: k8s.gcr.io
#master地址
controlPlaneEndpoint: "192.168.0.81:6443"
networking:
serviceSubnet: "10.96.0.0/16"
#k8s容器组所在的网段
podSubnet: "10.20.0.1/16"
dnsDomain: "cluster.local"
EOF
#cat > pull_k8s_images.sh << \EOF
set -o errexit
set -o nounset
set -o pipefail
##这里定义版本
KUBE_VERSION=v1.18.5
KUBE_PAUSE_VERSION=3.2
ETCD_VERSION=3.4.3-0
DNS_VERSION=1.6.7
GCR_URL=k8s.gcr.io
##这里就是写你要使用的仓库
DOCKERHUB_URL=gotok8s
##这里是镜像列表
images=(
kube-proxy:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-apiserver:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION}
coredns:${DNS_VERSION}
)
##这里是拉取和改名的循环语句
for imageName in ${images[@]} ; do
docker pull $DOCKERHUB_URL/$imageName
docker tag $DOCKERHUB_URL/$imageName $GCR_URL/$imageName
docker rmi $DOCKERHUB_URL/$imageName
done
EOF
#chmod +x ./pull_k8s_images.sh
#sh pull_k8s_images.sh
#docker save -o kubectl-latest.tar bitnami/kubectl:latest
docker save -o tidb-backup-manager-v1.2.4.tar pingcap/tidb-backup-manager:v1.2.4
docker save -o tidb-operator-v1.2.4.tar pingcap/tidb-operator:v1.2.4
docker save -o grafana-7.5.11.tar grafana/grafana:7.5.11
docker save -o ticdc-v5.2.1.tar pingcap/ticdc:v5.2.1
docker save -o tidb-monitor-initializer-v5.2.1.tar pingcap/tidb-monitor-initializer:v5.2.1
docker save -o tikv-v5.2.1.tar pingcap/tikv:v5.2.1
docker save -o tidb-v5.2.1.tar pingcap/tidb:v5.2.1
docker save -o tidb-binlog-v5.2.1.tar pingcap/tidb-binlog:v5.2.1
docker save -o pd-v5.2.1.tar pingcap/pd:v5.2.1
docker save -o tiflash-v5.2.1.tar pingcap/tiflash:v5.2.1
docker save -o prometheus-v2.27.1.tar prom/prometheus:v2.27.1
docker save -o kube-proxy-v1.18.5.tar k8s.gcr.io/kube-proxy:v1.18.5
docker save -o kube-controller-manager-v1.18.5.tar k8s.gcr.io/kube-controller-manager:v1.18.5
docker save -o kube-apiserver-v1.18.5.tar k8s.gcr.io/kube-apiserver:v1.18.5
docker save -o kube-scheduler-v1.18.5.tar k8s.gcr.io/kube-scheduler:v1.18.5
docker save -o node-v3.8.9.tar calico/node:v3.8.9
docker save -o pod2daemon-flexvol-v3.8.9.tar calico/pod2daemon-flexvol:v3.8.9
docker save -o cni-v3.8.9.tar calico/cni:v3.8.9
docker save -o kube-controllers-v3.8.9.tar calico/kube-controllers:v3.8.9
docker save -o advanced-statefulset-v0.3.3.tar pingcap/advanced-statefulset:v0.3.3
docker save -o prometheus-v2.18.1.tar prom/prometheus:v2.18.1
docker save -o local-volume-provisioner-v2.3.4.tar quay.io/external_storage/local-volume-provisioner:v2.3.4
docker save -o pause-3.2.tar k8s.gcr.io/pause:3.2
docker save -o coredns-1.6.7.tar k8s.gcr.io/coredns:1.6.7
docker save -o etcd-3.4.3-0.tar k8s.gcr.io/etcd:3.4.3-0
docker save -o tidb-monitor-reloader-v1.0.1.tar pingcap/tidb-monitor-reloader:v1.0.1
docker save -o grafana-6.0.1.tar grafana/grafana:6.0.1
docker save -o busybox-1.26.2.tar busybox:1.26.2
#docker load -i kubectl-latest.tar
docker load -i tidb-backup-manager-v1.2.4.tar
docker load -i tidb-operator-v1.2.4.tar
docker load -i grafana-7.5.11.tar
docker load -i ticdc-v5.2.1.tar
docker load -i tidb-monitor-initializer-v5.2.1.tar
docker load -i tikv-v5.2.1.tar
docker load -i tidb-v5.2.1.tar
docker load -i tidb-binlog-v5.2.1.tar
docker load -i pd-v5.2.1.tar
docker load -i tiflash-v5.2.1.tar
docker load -i prometheus-v2.27.1.tar
docker load -i kube-proxy-v1.18.5.tar
docker load -i kube-controller-manager-v1.18.5.tar
docker load -i kube-apiserver-v1.18.5.tar
docker load -i kube-scheduler-v1.18.5.tar
docker load -i node-v3.8.9.tar
docker load -i pod2daemon-flexvol-v3.8.9.tar
docker load -i cni-v3.8.9.tar
docker load -i kube-controllers-v3.8.9.tar
docker load -i advanced-statefulset-v0.3.3.tar
docker load -i prometheus-v2.18.1.tar
docker load -i local-volume-provisioner-v2.3.4.tar
docker load -i pause-3.2.tar
docker load -i coredns-1.6.7.tar
docker load -i etcd-3.4.3-0.tar
docker load -i tidb-monitor-reloader-v1.0.1.tar
docker load -i grafana-6.0.1.tar
docker load -i busybox-1.26.2.tar
#主节点master初始化k8s以及加入节点
#kubeadm init --config=kubeadm-config.yaml
#mkdir -p /root/.kube
#cp -i /etc/kubernetes/admin.conf /root/.kube/config
#wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml --no-check-certificate
#kubectl apply -f calico.yaml
#kubectl get nodes
#mkdir -p /mnt/disks
#for i in `seq 10`; do
mkdir -p /data/pv/pv0$i && mkdir -p /mnt/disks/pv0$i
mount --bind /data/pv/pv0$i /mnt/disks/pv0$i
done
#for i in `seq 10`; do
echo /data/pv/pv0${i} /mnt/disks/pv0${i} none bind 0 0 | sudo tee -a /etc/fstab
done
#for i in `seq 10`; do
umount /mnt/disks/pv0$i
done
#wget https://raw.githubusercontent.com/pingcap/tidb-operator/v1.2.4/manifests/crd.yaml
#wget http://charts.pingcap.org/tidb-operator-v1.2.4.tgz
#wget https://raw.githubusercontent.com/pingcap/tidb-operator/master/examples/basic/tidb-cluster.yaml
#wget https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz
#tar -zxvf helm-v3.7.0-linux-amd64.tar.gz -C /data/kubernetes/
#cp /data/kubernetes/linux-amd64/helm /usr/local/bin/
#helm repo add pingcap https://charts.pingcap.org/
#kubectl apply -f crd.yaml
#tar zxvf tidb-operator-v1.2.4.tgz
#vim tidb-operator/values.yaml
# This will default to matching your kubernetes version
kubeSchedulerImageTag: v1.18.5
#kubectl create namespace tidb-admin
#helm install tidb-operator ./tidb-operator --namespace=tidb-admin
#helm upgrade tidb-operator ./tidb-operator --namespace=tidb-admin
#kubectl apply -f local-volume-provisioner.yaml
#vim tidb-cluster.yaml
apiVersion: pingcap.com/v1alpha1
kind: TidbCluster
metadata:
name: tidb-cluster
namespace: tidb-cluster
pd:
baseImage: pingcap/pd
config: |
lease = 3
enable-prevote = true
replicas: 3
requests:
storage: 5Gi
mountClusterClientSecret: true
storageClassName: local-storage
tidb:
baseImage: pingcap/tidb
config: |
level = "info"
enable-timestamp = true
replicas: 3
service:
type: NodePort
externalTrafficPolicy: Local
mysqlNodePort: 30020
exposeStatus: true
statusNodePort: 30040
storageClassName: local-storage
tikv:
baseImage: pingcap/tikv
config: |
prevote = true
replicas: 3
requests:
storage: 1Gi
mountClusterClientSecret: true
separateRocksDBLog: true
separateRaftLog: true
storageClassName: local-storage
#kubectl create namespace tidb-cluster
#kubectl apply -f tidb-cluster.yaml
#kubectl create secret generic tidb-secret --from-literal=root='123456' --namespace=tidb-cluster
#wget https://github.com/pingcap/tidb-operator/blob/master/manifests/initializer/tidb-initializer.yaml
#kubectl apply -f tidb-cluster/tidb-initializer.yaml --namespace=tidb-cluster
#kubectl taint node master81 node-role.kubernetes.io/master-
#mysql -h 127.0.0.1 -P 30020 -u root
#
#kubeadm token create --print-join-command
#kubeadm join 192.168.0.81:6443 --token
#
#
#
#
#
#
#
#
#
#
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· SQL Server 2025 AI相关能力初探
· AI编程工具终极对决:字节Trae VS Cursor,谁才是开发者新宠?
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南