Loading

华为鲲鹏ARM部署云原生环境

组件分布

单节点

  • docker
  • docker-compose
  • harbor
  • rancher(基于docker启动)
  • k3s(k3s安装,之后有rancher导入纳管)
  • redis\mysql
  • 容器云部署

rancher v2.6.5

docker run -d --privileged --restart=unless-stopped \
           -p 4480:80 -p 4443:443 \
           -v /home/rancher/rancher:/var/lib/rancher \
           -v /home/rancher/auditlog:/var/log/auditlog \
           -v /home/rancher/kubelet:/var/lib/kubelet \
           -v /home/rancher/cni:/var/lib/cni \
           --name rancher \
           rancher/rancher:v2.6.5

防火墙:

sudo firewall-cmd --zone=public --add-port=4480/tcp --permanent
sudo firewall-cmd --zone=public --add-port=4443/tcp --permanent
sudo firewall-cmd --reload
sudo firewall-cmd --zone=public --list-ports

密码:

docker logs 29bddf277f9d  2>&1 | grep "Bootstrap Password:"
nxg5tcnj4jkh26475w4wmhskmccbd2f88pqdsk5m897f96vc88hkpx

K3S v1.23.6

# github 上找arm64二进制文件
# https://github.com/k3s-io/k3s/releases/tag/v1.23.6%2Bk3s1
# 准备自己的install-k3s.sh
mv k3s-arm64 k3s
./install-k3s.sh 192.168.xxx.xxx

image

# 导入集群纳管
[root@dc3-80-254 k3s-v1.23.6]# kubectl apply -f https://172.xxx.xxx.13:4443/v3/import/twgkn2srrhd7bflft9w8cft2v58bwwzx2jrnnz7lwpdgwsj45pp8gd_c-m-kpthxclq.yamlUnable to connect to the server: x509: certificate signed by unknown authority
[root@dc3-80-254 k3s-v1.23.6]# curl --insecure -sfL https://172.xxx.xxx.13:4443/v3/import/twgkn2srrhd7bflft9w8cft2v58bwwzx2jrnnz7lwpdgwsj45pp8gd_c-m-kpthxclq.yaml | kubectl apply -f -
clusterrole.rbac.authorization.k8s.io/proxy-clusterrole-kubeapiserver created
clusterrolebinding.rbac.authorization.k8s.io/proxy-role-binding-kubernetes-master created
namespace/cattle-system created
serviceaccount/cattle created
clusterrolebinding.rbac.authorization.k8s.io/cattle-admin-binding created
secret/cattle-credentials-c413452 created
clusterrole.rbac.authorization.k8s.io/cattle-admin created
Warning: spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key: beta.kubernetes.io/os is deprecated since v1.14; use "kubernetes.io/os" instead
deployment.apps/cattle-cluster-agent created
service/cattle-cluster-agent created

获取k3s的kubeconfig

$ kubectl config view --minify --raw > ~/.kube/config
$ chmod g-rw ~/.kube/config
$ chmod o-r ~/.kube/config

harbor v2.7.0

镜像来源:https://github.com/octohelm/harbor

# 拉取镜像
docker pull ghcr.io/octohelm/harbor/prepare:v2.7.0@sha256:30f00f9f4e342bcbc9004360d3aa7647e662694019d509bd14776f6229571bc0 
docker pull ghcr.io/octohelm/harbor/harbor-db:v2.7.0@sha256:8bd418bef596c4942ac7cfe2e39321b0e6285600f11c07d9c02eb818f52321b4  
docker pull ghcr.io/octohelm/harbor/harbor-core:v2.7.0@sha256:dd7f3898f32caf8e03cee046596f03034f4297231458d4de39775dd58709b55a 
docker pull ghcr.io/octohelm/harbor/harbor-log:v2.7.0@sha256:7c15f2549eb3d8b7947561e5e276a93f2559a379d75f9af817aa91338f45f1cd  
docker pull ghcr.io/octohelm/harbor/harbor-exporter:v2.7.0@sha256:c204a20b3ad934e59afcdfe5c66c1d02ab059d5606ecc485ff03ceb450b04528 
docker pull ghcr.io/octohelm/harbor/nginx-photon:v2.7.0@sha256:5dc6549c4ebe15bde4493a811fe1fad6c72bbecb308901adaf6c6810ee5fb13c 
docker pull ghcr.io/octohelm/harbor/chartmuseum-photon:v2.7.0@sha256:0815066d46474b9403b2d2e5f6f9e2ae44d067d8d2f8523b95ea3d3f20f3d058 
docker pull ghcr.io/octohelm/harbor/harbor-portal:v2.7.0@sha256:b3f4e0e990500362b554338579497ad89af5473e024564731563704ceab9305b 
docker pull ghcr.io/octohelm/harbor/harbor-jobservice:v2.7.0@sha256:7abd6694f546172ffec4a87e389e8ba425fa6ee82479782693c120a89a291435 
docker pull ghcr.io/octohelm/harbor/harbor-registryctl:v2.7.0@sha256:a13617e86374a55d40afe336433011e42745da49b882559efc3dedc49b7129f1 
docker pull ghcr.io/octohelm/harbor/registry-photon:v2.7.0@sha256:d5f23b2bc4271b2eb1ec002eb0c0c51e708015944316e5bd17c61de73ea54415 
docker pull ghcr.io/octohelm/harbor/redis-photon:v2.7.0@sha256:b2e02d7c81ca9ceb1b352c590f2693b7b2ce196b8cd42ef22064634215670861 
docker pull ghcr.io/octohelm/harbor/notary-signer-photon:v2.7.0@sha256:22167d6c33f749127335160eb106b5b7475ed73ccd7d925bbeef0a59649a98e1 
docker pull ghcr.io/octohelm/harbor/trivy-adapter-photon:v2.7.0@sha256:b36562dd74cd9a7ccee0b6350a6d8ff791055c0e16f5035d101a29a2e77854e1

# 打标签
docker tag 2361f5ef122e goharbor/harbor-core:v2.7.0 \
&& docker tag 459092dbe977 goharbor/harbor-jobservice:v2.7.0 \
&& docker tag 1c30f3fea336 goharbor/prepare:v2.7.0 \
&& docker tag 2453bb211ff5 goharbor/chartmuseum-photon:v2.7.0 \
&& docker tag 3d4d217622cc goharbor/harbor-portal:v2.7.0 \
&& docker tag 572e8040ebe9 goharbor/harbor-registryctl:v2.7.0 \
&& docker tag eaf51a9e8c8e goharbor/notary-signer-photon:v2.7.0 \
&& docker tag d92a3ab0f824 goharbor/registry-photon:v2.7.0 \
&& docker tag 9377b6ec8ce0 goharbor/harbor-db:v2.7.0 \
&& docker tag 3df249438174 goharbor/harbor-exporter:v2.7.0 \
&& docker tag ec718572f3bd goharbor/trivy-adapter-photon:v2.7.0 \
&& docker tag 15937335a06c goharbor/redis-photon:v2.7.0 \
&& docker tag de78e559f67e goharbor/nginx-photon:v2.7.0 \
&& docker tag a0154089d067 goharbor/harbor-log:v2.7.0

# 导出镜像
docker save $(docker images | grep v2.7.0 | awk 'BEGIN{OFS=":";ORS=" "}{print $1,$2}') -o harbor.v2.7.0.tar.gz

# 导入镜像
docker load -i harbor.v2.7.0.tar.gz

其他命令:

docker rmi $(docker images |grep v2.7.0 | awk '{print $3}'

helm

https://github.com/helm/helm/releases/tag/v3.6.3
mv ./helm /usr/local/bin/helm

redis

helm install redis-ha redis-17.11.3.tgz \
--set global.storageClass=local-path \
--set global.redis.password=23ddsd2dx \
--set image.registry=harbor84.xxx.cn:20000 \
--set image.repository=test/redis \
--set image.tag=6.0.12 \
--set architecture=replication  \
--set master.service.type=NodePort \
--version 17.11.3

MySQL

## mysql image version
## ref: https://hub.docker.com/r/library/mysql/tags/
##
image: "ibex/debian-mysql-server-5.7"
imageTag: "5.7.26"

strategy:
  type: Recreate

busybox:
  image: "busybox"
  tag: "1.32"

testFramework:
  enabled: true
  image: "bats/bats"
  tag: "1.2.1"
  imagePullPolicy: IfNotPresent
  securityContext: {}

## Specify password for root user
##
## Default: random 10 character string
# mysqlRootPassword: testing

## Create a database user
##
# mysqlUser:
## Default: random 10 character string
# mysqlPassword:

## Allow unauthenticated access, uncomment to enable
##
# mysqlAllowEmptyPassword: true

## Create a database
##
# mysqlDatabase:

## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
##
imagePullPolicy: IfNotPresent

## Additionnal arguments that are passed to the MySQL container.
## For example use --default-authentication-plugin=mysql_native_password if older clients need to
## connect to a MySQL 8 instance.
args: []

extraVolumes: |
  # - name: extras
  #   emptyDir: {}

extraVolumeMounts: |
  # - name: extras
  #   mountPath: /usr/share/extras
  #   readOnly: true

extraInitContainers: |
  # - name: do-something
  #   image: busybox
  #   command: ['do', 'something']

## A string to add extra environment variables
# extraEnvVars: |
#   - name: EXTRA_VAR
#     value: "extra"

# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
  # - name: myRegistryKeySecretName

## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}

## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}

## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []

livenessProbe:
  initialDelaySeconds: 30
  periodSeconds: 10
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 3

readinessProbe:
  initialDelaySeconds: 5
  periodSeconds: 10
  timeoutSeconds: 1
  successThreshold: 1
  failureThreshold: 3

## Persist data to a persistent volume
persistence:
  enabled: true
  ## database data Persistent Volume Storage Class
  ## If defined, storageClassName: <storageClass>
  ## If set to "-", storageClassName: "", which disables dynamic provisioning
  ## If undefined (the default) or set to null, no storageClassName spec is
  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  ##   GKE, AWS & OpenStack)
  ##
  storageClass: "local-path"
  accessMode: ReadWriteOnce
  size: 20Gi
  annotations: {}

## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:

## Security context
securityContext:
  enabled: false
  runAsUser: 999
  fsGroup: 999

## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
  requests:
    memory: 256Mi
    cpu: 100m

# Custom mysql configuration files path
configurationFilesPath: /etc/mysql/conf.d/

# Custom mysql configuration files used to override default mysql settings
configurationFiles: 
  mysql.cnf: |-
    [mysqld]
    pid-file	= /var/run/mysqld/mysqld.pid
    socket		= /var/run/mysqld/mysqld.sock
    datadir		= /var/lib/mysql
    log-error	= /var/log/mysql/error.log
    # By default we only accept connections from localhost
    bind-address	= 127.0.0.1
    # Disabling symbolic-links is recommended to prevent assorted security risks
    symbolic-links=0
    sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION

# Custom mysql init SQL files used to initialize the database
initializationFiles: {}
#  first-db.sql: |-
#    CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
#  second-db.sql: |-
#    CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;

# To enaable the mysql X Protocol's port
# .. will expose the port 33060
# .. Note the X Plugin needs installation
# ref: https://dev.mysql.com/doc/refman/8.0/en/x-plugin-checking-installation.html
mysqlx:
  port:
    enabled: false

metrics:
  enabled: false
  image: prom/mysqld-exporter
  imageTag: v0.10.0
  imagePullPolicy: IfNotPresent
  resources: {}
  annotations: {}
    # prometheus.io/scrape: "true"
    # prometheus.io/port: "9104"
  livenessProbe:
    initialDelaySeconds: 15
    timeoutSeconds: 5
  readinessProbe:
    initialDelaySeconds: 5
    timeoutSeconds: 1
  flags: []
  serviceMonitor:
    enabled: false
    additionalLabels: {}

## Configure the service
## ref: http://kubernetes.io/docs/user-guide/services/
service:
  annotations: {}
  ## Specify a service type
  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
  type: NodePort
  port: 3306
  # nodePort: 32000
  # loadBalancerIP:

## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
  ## Specifies whether a ServiceAccount should be created
  ##
  create: false
  ## The name of the ServiceAccount to use.
  ## If not set and create is true, a name is generated using the mariadb.fullname template
  # name:

ssl:
  enabled: false
  secret: mysql-ssl-certs
  certificates:
#  - name: mysql-ssl-certs
#    ca: |-
#      -----BEGIN CERTIFICATE-----
#      ...
#      -----END CERTIFICATE-----
#    cert: |-
#      -----BEGIN CERTIFICATE-----
#      ...
#      -----END CERTIFICATE-----
#    key: |-
#      -----BEGIN RSA PRIVATE KEY-----
#      ...
#      -----END RSA PRIVATE KEY-----

## Populates the 'TZ' system timezone environment variable
## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html
##
## Default: nil (mysql will use image's default timezone, normally UTC)
## Example: 'Australia/Sydney'
# timezone:

# Deployment Annotations
deploymentAnnotations: {}

# To be added to the database server pod(s)
podAnnotations: {}
podLabels: {}

## Set pod priorityClassName
# priorityClassName: {}

## Init container resources defaults
initContainer:
  resources:
    requests:
      memory: 10Mi
      cpu: 10m
$ helm install mysql57 -f values.yaml ./mysql-1.6.9.tgz --dry-run --debug

RKE v1.3.11


# root用户下执行
$ useradd rancher
$ usermod -aG docker rancher
$ echo "adcsd232;@sd" | passwd --stdin rancher
# 切换到rancher用户
$ su rancher

# rancher用户下执行
$ ssh-keygen
$ ssh-copy-id -i ~/.ssh/id_rsa.pub rancher@节点IP
# root用户下,测试免密登录
ssh rancher@173.xxx.xxx.5 -i /home/rancher/.ssh/id_rsa

之后,切换回root用户, 查看到目录, 用id_rsa说明配置成功
[root@DC3-20-007 .ssh]# ls /home/rancher/.ssh/
authorized_keys  id_rsa  id_rsa.pub  known_hosts

$ chmod +x rke_linux-arm64-v1.4.6
$ mv rke_linux-arm64-v1.4.6 /usr/local/bin/rke
$  ll /opt/cni/bin
ls: cannot access '/opt/cni/bin': No such file or directory

# 下载驱动 https://github.com/containernetworking/plugins/releases/tag/v1.3.0
$ mkdir -p /opt/cni/bin
$ wget https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-arm64-v1.3.0.tgz
$ tar -zxvf cni-plugins-linux-arm64-v1.3.0.tgz

[rancher@DC3-20-007 ~]$ rke config --name cluster.yml
[+] Cluster Level SSH Private Key Path [~/.ssh/id_rsa]: 
[+] Number of Hosts [1]: 
[+] SSH Address of host (1) [none]: 
[+] SSH Port of host (1) [22]: 
[+] SSH Private Key Path of host () [none]: 
[-] You have entered empty SSH key path, trying fetch from SSH key parameter
[+] SSH Private Key of host () [none]: 
[-] You have entered empty SSH key, defaulting to cluster level SSH key: ~/.ssh/id_rsa
[+] SSH User of host () [ubuntu]: rancher
[+] Is host () a Control Plane host (y/n)? [y]: 
[+] Is host () a Worker host (y/n)? [n]: y
[+] Is host () an etcd host (y/n)? [n]: y
[+] Override Hostname of host () [none]: 
[+] Internal IP of host () [none]: 
[+] Docker socket path on host () [/var/run/docker.sock]: 
[+] Network Plugin Type (flannel, calico, weave, canal, aci) [canal]: flannel
[+] Authentication Strategy [x509]: 
[+] Authorization Mode (rbac, none) [rbac]: 
[+] Kubernetes Docker image [rancher/hyperkube:v1.23.6-rancher1]: 
[+] Cluster domain [cluster.local]: 
[+] Service Cluster IP Range [10.43.0.0/16]: 
[+] Enable PodSecurityPolicy [n]: 
[+] Cluster Network CIDR [10.42.0.0/16]: 
[+] Cluster DNS Service IP [10.43.0.10]: 
[+] Add addon manifest URLs or YAML files [no]:

本地test001机器测试rke

# 环境描述:
# 本地docker基于root启动的

# 创建rke001用户
[root@~]# useradd rke001
# 官网这里是说docker基于docker用户组的情况下,而本机是root用户组
# [root@~]# usermod -aG docker rke001
[root@~]# usermod -aG root rke001
# 设置rke001的密码
[root@~]# echo "adcsd232;@sd" | passwd --stdin rke001
[root@~]# su rke001
# 检查rke001是否能访问docker
[rke001@~]# docker ps 
[rke001@~]# exit

# 授予rke001一样的root权限(非必须)
[root@~]# vi /etc/sudoers
# 修改以下内容:
## Allow root to run any commands anywhere 
root	ALL=(ALL) 	ALL
rke001 ALL=(ALL) 	ALL

# 以下步骤设置root用户下,ssh rke001@ip 免密码登录
[root@~]# cat ~/.ssh/id_rsa.pub 
# 如果没有该文件,则执行下面的ssh-keygen -t rsa
[root@~]# ssh-keygen -t rsa 
[root@~]# ssh-copy-id rke001@192.168.xxx.xxx 
# 输入密码,之后看到提示
rke001@192.168.xxx.xxx's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'rke001@192.168.xxx.xxx'"
and check to make sure that only the key(s) you wanted were added.

[root@~]# ssh rke001@192.168.xxx.xxx  
# 这里不用密码了,直接进入rke001用户,说明root对rke001的免密登录成功
Last login: Wed Jul 19 10:59:16 2023
[rke001@~]# exit 
[root@~]# rke up -config cluster.yml

添加网络源,安装Kubectl

参考: https://www.bilibili.com/read/cv18866047/

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kubectl

yum install -y kubectl-1.24.14
# yum install -y kubelet-1.24.14 kubeadm-1.24.14

问题

[root@dc3-80-254 harbor]# docker-compose ps
      Name                     Command                       State                                                  Ports                                        
-----------------------------------------------------------------------------------------------------------------------------------------------------------------
chartmuseum         ./docker-entrypoint.sh           Up (healthy)                                                                                                
harbor-core         /harbor/entrypoint.sh            Up (health: starting)                                                                                       
harbor-db           /docker-entrypoint.sh  13        Up (healthy)                                                                                                
harbor-jobservice   /harbor/entrypoint.sh            Restarting                                                                                                  
harbor-log          /bin/sh -c /usr/local/bin/ ...   Up (healthy)            127.0.0.1:1514->10514/tcp                                                           
harbor-portal       nginx -g daemon off;             Up (healthy)                                                                                                
nginx               nginx -g daemon off;             Up (healthy)            0.0.0.0:1803->8080/tcp,:::1803->8080/tcp, 0.0.0.0:20000->8443/tcp,:::20000->8443/tcp
redis               redis-server /etc/redis.conf     Restarting                                                                                                  
registry            /home/harbor/entrypoint.sh       Up (healthy)                                                                                                
registryctl         /home/harbor/start.sh            Up (healthy)                                                                                                
trivy-adapter       /home/scanner/entrypoint.sh      Up (healthy)

查看redis相关日志:

[root@dc3-80-254 harbor]# docker logs 7ccda9a34831
<jemalloc>: Unsupported system page size
<jemalloc>: Unsupported system page size
<jemalloc>: Unsupported system page size
<jemalloc>: Unsupported system page size
<jemalloc>: Unsupported system page size
<jemalloc>: Unsupported system page size

解决参考:https://blog.csdn.net/yinjl123456/article/details/118179611 , 主要是自己下载redis到arm64服务器上执行边缘,再重新打tag

git clone 
git clone https://github.com/docker-library/redis.git
cd redis/7.0
#构建出新的镜像
docker build -t arm64v8/cetos_redis:7.0 . &

#改成harbor的镜像
docker tag arm64v8/cetos_redis:7.0 goharbor/redis-photon:v2.7.0 

#然后在启动成功
cd /data/harbor
# 停止并删除容器
docker-compose down
# 重新启动
docker-compose up -d  

查看harbor-jobservice日志:

2023-07-13T09:19:46Z [ERROR] [/pkg/config/rest/rest.go:50]: Failed on load rest config err:Get "http://core:8080/api/v2.0/internalconfig": dial tcp 172.23.0.3:8080: connect: connection refused, url:http://core:8080/api/v2.0/internalconfig
panic: failed to load configuration, error: failed to load rest config

goroutine 1 [running]:
main.main()
	/harbor/src/jobservice/main.go:42 +0x478
Appending internal tls trust CA to ca-bundle ...
find: '/etc/harbor/ssl': No such file or directory
Internal tls trust CA appending is Done.
2023-07-13T09:20:47Z [ERROR] [/pkg/registry/client.go:82]: Failed to parse REGISTRY_HTTP_CLIENT_TIMEOUT: strconv.ParseInt: parsing "": invalid syntax, use default value: 30m0s
2023-07-13T09:20:47Z [INFO] [/controller/artifact/annotation/parser.go:71]: the annotation parser to parser artifact annotation version v1alpha1 registered
2023-07-13T09:20:47Z [INFO] [/controller/artifact/processor/processor.go:59]: the processor to process media type application/vnd.wasm.config.v1+json registered

上面的问题,是以下命令执行报错:

"curl --fail -s http://localhost:8080/api/v1/stats || curl -sk --fail --key /etc/harbor/ssl/job_service.key --cert /etc/harbor/ssl/job_service.crt https://localhost:8443/api/v1/stats || exit 1"

harbor\harbor.v2.7.0.tar.gz

rke安装遇到sshd_config配置的问题

原文件:

[root@DC3-20-007 rke]# cat /etc/ssh/sshd_config
#	$OpenBSD: sshd_config,v 1.103 2018/04/09 20:41:22 tj Exp $

# This is the sshd server system-wide configuration file.  See
# sshd_config(5) for more information.

# This sshd was compiled with PATH=/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin

# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented.  Uncommented options override the
# default value.

# If you want to change the port on a SELinux system, you have to tell
# SELinux about this change.
# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER
#
#Port 22
#AddressFamily any
#ListenAddress 0.0.0.0
#ListenAddress ::

HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
HostKey /etc/ssh/ssh_host_ed25519_key

# Ciphers and keying
#RekeyLimit default none

# Logging
#SyslogFacility AUTH
SyslogFacility AUTH
#LogLevel INFO

# Authentication:

#LoginGraceTime 2m
PermitRootLogin yes
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10

#PubkeyAuthentication yes

# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2
# but this is overridden so installations will only check .ssh/authorized_keys
AuthorizedKeysFile	.ssh/authorized_keys

#AuthorizedPrincipalsFile none

#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody

# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes

# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
#PermitEmptyPasswords no
PasswordAuthentication yes

# Change to no to disable s/key passwords
#ChallengeResponseAuthentication yes
ChallengeResponseAuthentication no

# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
#KerberosUseKuserok yes

# GSSAPI options
GSSAPIAuthentication yes
GSSAPICleanupCredentials no
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
#GSSAPIEnablek5users no

# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the ChallengeResponseAuthentication and
# PasswordAuthentication.  Depending on your PAM configuration,
# PAM authentication via ChallengeResponseAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
# WARNING: 'UsePAM no' is not supported in UnionTech and may cause several
# problems.
UsePAM yes

#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding no
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
#PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /var/run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none

# no default banner path
#Banner none

AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
AcceptEnv XMODIFIERS

# override default of no subsystems
Subsystem sftp /usr/libexec/openssh/sftp-server -l INFO -f AUTH

# Example of overriding settings on a per-user basis
#Match User anoncvs
#	X11Forwarding no
#	AllowTcpForwarding no
#	PermitTTY no
#	ForceCommand cvs server

#CheckUserSplash yes

# To modify the system-wide ssh configuration, create a  *.conf  file under
#  /etc/ssh/sshd_config.d/  which will be automatically included below
#Include /etc/ssh/sshd_config.d/*.conf
Protocol 2
LogLevel VERBOSE
PubkeyAuthentication yes
RSAAuthentication yes
IgnoreRhosts yes
RhostsRSAAuthentication no
HostbasedAuthentication no
PermitEmptyPasswords no
PermitUserEnvironment no
Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com
ClientAliveCountMax 0
Banner /etc/issue.net
MACs hmac-sha2-512,hmac-sha2-512-etm@openssh.com,hmac-sha2-256,hmac-sha2-256-etm@openssh.com
StrictModes yes
AllowTcpForwarding no
AllowAgentForwarding no
GatewayPorts no
PermitTunnel no
KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256

遇到问题:

[root@DC3-20-007 rke]# cat cluster.yml 
nodes:
    - address: 127.0.0.1
      ssh_key_path: "/root/.ssh/id_rsa"
      user: rke002
      role:
        - controlplane
        - etcd
        - worker

cluster_name: rke002

addon_job_timeout: 30

network:
  plugin: flannel

[root@DC3-20-007 rke]# rke up -config cluster.yml
[root@DC3-20-007 rke]# rke up -config cluster.yml 
INFO[0000] Running RKE version: v1.3.11                 
INFO[0000] Initiating Kubernetes cluster                
INFO[0000] [dialer] Setup tunnel for host [127.0.0.1]   
WARN[0000] Failed to set up SSH tunneling for host [127.0.0.1]: Can't retrieve Docker Info: error during connect: Get "http://%2Fvar%2Frun%2Fdocker.sock/v1.24/info": Unable to access the service on /var/run/docker.sock. The service might be still starting up. Error: ssh: rejected: connect failed (open failed) 
WARN[0000] Removing host [127.0.0.1] from node lists    
WARN[0000] [state] can't fetch legacy cluster state from Kubernetes: Cluster must have at least one etcd plane host: failed to connect to the following etcd host(s) [127.0.0.1] 
INFO[0000] [certificates] Generating CA kubernetes certificates 
INFO[0000] [certificates] Generating Kubernetes API server aggregation layer requestheader client CA certificates 
INFO[0000] [certificates] GenerateServingCertificate is disabled, checking if there are unused kubelet certificates 
INFO[0000] [certificates] Generating Kubernetes API server certificates 
INFO[0001] [certificates] Generating Service account token key 
INFO[0001] [certificates] Generating Kube Controller certificates 
INFO[0001] [certificates] Generating Kube Scheduler certificates 
INFO[0001] [certificates] Generating Kube Proxy certificates 
INFO[0002] [certificates] Generating Node certificate   
INFO[0002] [certificates] Generating admin certificates and kubeconfig 
INFO[0002] [certificates] Generating Kubernetes API server proxy client certificates 
INFO[0002] Successfully Deployed state file at [./cluster.rkestate] 
INFO[0002] Building Kubernetes cluster                  
FATA[0002] Cluster must have at least one etcd plane host: please specify one or more etcd in cluster config

修改注释:

# To modify the system-wide ssh configuration, create a  *.conf  file under
#  /etc/ssh/sshd_config.d/  which will be automatically included below
#Include /etc/ssh/sshd_config.d/*.conf
#Protocol 2
#LogLevel VERBOSE
#PubkeyAuthentication yes
#RSAAuthentication yes
#IgnoreRhosts yes
#RhostsRSAAuthentication no
#HostbasedAuthentication no
#PermitEmptyPasswords no
#PermitUserEnvironment no
#Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com
#ClientAliveCountMax 0
#Banner /etc/issue.net
#MACs hmac-sha2-512,hmac-sha2-512-etm@openssh.com,hmac-sha2-256,hmac-sha2-256-etm@openssh.com
#StrictModes yes
#AllowTcpForwarding no
#AllowAgentForwarding no
#GatewayPorts no
#PermitTunnel no
#KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256
posted @ 2024-10-24 14:43  集君  阅读(13)  评论(0编辑  收藏  举报