ubuntu手动安装k8s(简易版)
集群环境:C1(master01),C2(node02)
1、安装dokcer并启动
sudo apt-get install docker.io
sudo service docker start
2、关闭firewilld和selinux
查看selinux状态:/usr/sbin/sestatus -v
临时关闭:setenforce 0
永久关闭:修改/etc/selinux/config文件中设置SELINUX=disabled ,然后重启服务器。
显示防火墙和端口的侦听状态:ufw status
关闭的防火墙 :ufw disable
开启防火墙:ufw enable
3、配置hosts并同步时钟
hostnamectl set-hostname master01
hostnamectl set-hostname node01
echo -e "192.168.25.30 master01\n192.168.25.31 node01" >> /etc/hosts
4、待docker安装完成,配置docker.service(每个节点)
vi /lib/systemd/system/docker.service
#找到ExecStart=xxx,在这行上面加入一行,内容如下:(k8s的网络需要)
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
5、重读systemctl并启动docker
systemctl daemon-reload
systemctl start docker
6、设置系统参数 - 允许路由转发,不对bridge的数据进行处理
#写入配置文件
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#生效配置文件
sysctl -p /etc/sysctl.d/k8s.conf
7、这里k8s采用二进制安装方式
#安装cfssl(所有节点) wget -q --timestamping \ https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \ https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson cfssl version #生成根证书(主节点) mkdir -p /etc/kubernetes/ca vim /etc/kubernetes/ca/ca-config.json #输入一下内容 根据官网模板修改 { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } } vim /etc/kubernetes/ca/ca-csr.json #输入一下内容 根据官网模板修改 { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } #生成证书和秘钥 cd /etc/kubernetes/ca cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem #部署etcd #生成etcd证书 mkdir -p /var/lib/etcd #工作目录 mkdir -p /etc/kubernetes/ca/etcd vim /etc/kubernetes/ca/etcd/etcd-csr.json #输入一下内容 根据官网模板修改 { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.66.135" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/etcd/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes etcd-csr.json | cfssljson -bare etcd ls etcd.csr etcd-csr.json etcd-key.pem etcd.pem #配置etcd为系统服务 vim /lib/systemd/system/etcd.service #输入如下内容,注意修改主机ip和etcd路径以及ca证书位置 [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/root/bin/etcd \ --name=192.168.66.135 \ --listen-client-urls=https://192.168.66.135:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://192.168.66.135:2379 \ --data-dir=/var/lib/etcd \ --listen-peer-urls=https://192.168.66.135:2380 \ --initial-advertise-peer-urls=https://192.168.66.135:2380 \ --cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ --key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ --peer-cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ --peer-key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ --trusted-ca-file=/etc/kubernetes/ca/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/ca/ca.pem Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target #启动服务 systemctl daemon-reload systemctl start etcd #验证etcd ETCDCTL_API=3 etcdctl \ --endpoints=https://192.168.66.135:2379 \ --cacert=/etc/kubernetes/ca/ca.pem \ --cert=/etc/kubernetes/ca/etcd/etcd.pem \ --key=/etc/kubernetes/ca/etcd/etcd-key.pem \ endpoint health #出现类似如下结果,说明配置成功 https://192.168.66.135:2379 is healthy: successfully committed proposal: took = 5.194485ms #如果有异常可以查看日志 journalctl -f -u etcd.service # 部署APIServer(主节点) #生成证书 mkdir -p /etc/kubernetes/ca/kubernetes #准备csr,类似etcd cat kubernetes-csr.json { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.66.135", "10.68.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/kubernetes/ #使用根证书(ca.pem)签发kubernetes证书 cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes ls kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem #生成随机token head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 11f4d7eafcf06965e413a409d35c3893 #按照固定格式写入token.csv,注意替换token内容 echo "11f4d7eafcf06965e413a409d35c3893,kubelet-bootstrap,10001,\"system:kubelet-bootstrap\"" > /etc/kubernetes/ca/kubernetes/token.csv #配置apiserver为系统服务 vim /lib/systemd/system/kube-apiserver.service #输入如下内容,注意修改ip和相关路径 [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/root/bin/kube-apiserver \ --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ --insecure-bind-address=127.0.0.1 \ --kubelet-https=true \ --bind-address=192.168.66.135 \ --authorization-mode=Node,RBAC \ --runtime-config=rbac.authorization.k8s.io/v1 \ --enable-bootstrap-token-auth \ --token-auth-file=/etc/kubernetes/ca/kubernetes/token.csv \ --tls-cert-file=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ --client-ca-file=/etc/kubernetes/ca/ca.pem \ --service-account-key-file=/etc/kubernetes/ca/ca-key.pem \ --etcd-cafile=/etc/kubernetes/ca/ca.pem \ --etcd-certfile=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ --service-cluster-ip-range=10.68.0.0/16 \ --service-node-port-range=20000-40000 \ --etcd-servers=https://192.168.66.135:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/lib/audit.log \ --event-ttl=1h \ --v=2 Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target #启动服务 systemctl daemon-reload systemctl start kube-apiserver #部署CalicoNode(所有节点) #准备证书 vim /etc/kubernetes/ca/calico/calico-csr.json { "CN": "calico", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] cd /etc/kubernetes/ca/calico/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes calico-csr.json | cfssljson -bare calico #配置为系统服务 vim /lib/systemd/system/kube-calico.service [Unit] Description=calico node After=docker.service Requires=docker.service [Service] User=root PermissionsStartOnly=true ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ -e ETCD_ENDPOINTS=https://192.168.66.135:2379 \ -e ETCD_CA_CERT_FILE=/etc/kubernetes/ca/ca.pem \ -e ETCD_CERT_FILE=/etc/kubernetes/ca/calico/calico.pem \ -e ETCD_KEY_FILE=/etc/kubernetes/ca/calico/calico-key.pem \ -e CALICO_LIBNETWORK_ENABLED=true \ -e CALICO_NETWORKING_BACKEND=bird \ -e CALICO_DISABLE_FILE_LOGGING=true \ -e CALICO_IPV4POOL_CIDR=172.20.0.0/16 \ -e CALICO_IPV4POOL_IPIP=off \ -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ -e FELIX_IPV6SUPPORT=false \ -e FELIX_LOGSEVERITYSCREEN=info \ -e FELIX_IPINIPMTU=1440 \ -e FELIX_HEALTHENABLED=true \ -e IP= \ -v /etc/kubernetes/ca:/etc/kubernetes/ca \ -v /var/run/calico:/var/run/calico \ -v /lib/modules:/lib/modules \ -v /run/docker/plugins:/run/docker/plugins \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/log/calico:/var/log/calico \ registry.cn-hangzhou.aliyuncs.com/imooc/calico-node:v2.6.2 ExecStop=/usr/bin/docker rm -f calico-node Restart=always RestartSec=10 [Install] WantedBy=multi-user.target #启动服务 systemctl enable kube-calico.service systemctl start kube-calico.service #部署ControllerManager(主节点) #配置ControllerManager为系统服务 vim /lib/systemd/system/kube-controller-manager.service #输入如下内容,注意修改主机ip和相关路径 [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/root/bin/kube-controller-manager \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --allocate-node-cidrs=true \ --service-cluster-ip-range=10.68.0.0/16 \ --cluster-cidr=172.20.0.0/16 \ --cluster-name=kubernetes \ --leader-elect=true \ --cluster-signing-cert-file=/etc/kubernetes/ca/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ca/ca-key.pem \ --service-account-private-key-file=/etc/kubernetes/ca/ca-key.pem \ --root-ca-file=/etc/kubernetes/ca/ca.pem \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #启动服务 systemctl daemon-reload systemctl start kube-controller-manager # 部署Scheduler(主节点) #配置Scheduler为系统服务 vim /lib/systemd/system/kube-scheduler.service #输入如下内容,注意修改主机ip和相关路径 [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/root/bin/kube-scheduler \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --leader-elect=true \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #启动服务 systemctl enable kube-scheduler.service systemctl start kube-scheduler.service #配置kubectl mkdir -p /etc/kubernetes/ca/admin #准备csr.json,内容如下 { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "system:masters", "OU": "System" } ] } cd /etc/kubernetes/ca/admin/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes admin-csr.json | cfssljson -bare admin #指定apiserver的地址和证书位置 kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ca/ca.pem \ --embed-certs=true \ --server=https://192.168.66.135:6443 #设置客户端认证参数,指定admin证书和秘钥 kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ca/admin/admin.pem \ --embed-certs=true \ --client-key=/etc/kubernetes/ca/admin/admin-key.pem #关联用户和集群 kubectl config set-context kubernetes \ --cluster=kubernetes --user=admin #设置当前上下文 kubectl config use-context kubernetes #部署kubelet #创建角色绑定(主节点) kubectl -n kube-system get clusterrole kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap #创建bootstrap.kubeconfig(工作节点) #设置集群参数(注意替换ip) kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ca/ca.pem \ --embed-certs=true \ --server=https://192.168.66.135:6443 \ --kubeconfig=bootstrap.kubeconfig #设置客户端认证参数(注意替换token) kubectl config set-credentials kubelet-bootstrap \ --token=11f4d7eafcf06965e413a409d35c3893\ --kubeconfig=bootstrap.kubeconfig #设置上下文 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig #选择上下文 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig mv bootstrap.kubeconfig /etc/kubernetes/ #准备cni配置 [root@C2 ~]# cat /etc/cni/net.d/10-calico.conf { "name": "calico-k8s-network", "cniVersion": "0.1.0", "type": "calico", "etcd_endpoints": "https://192.168.66.135:2379", "etcd_key_file": "/etc/kubernetes/ca/calico/calico-key.pem", "etcd_cert_file": "/etc/kubernetes/ca/calico/calico.pem", "etcd_ca_cert_file": "/etc/kubernetes/ca/ca.pem", "log_level": "info", "ipam": { "type": "calico-ipam" }, "kubernetes": { "kubeconfig": "/etc/kubernetes/kubelet.kubeconfig" } } #配置为系统服务并启动 [root@C2 ~]# cat /lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/root/bin/kubelet \ --address=192.168.66.136 \ --hostname-override=192.168.66.136 \ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/imooc/pause-amd64:3.0 \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/ca \ --hairpin-mode hairpin-veth \ --network-plugin=cni \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/root/bin \ --cluster-dns=10.68.0.2 \ --cluster-domain=cluster.local. \ --allow-privileged=true \ --fail-swap-on=false \ --logtostderr=true \ --v=2 #kubelet cAdvisor 默认在所有接口监听 4194 端口的请求, 以下iptables限制内网访问 ExecStartPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #启动kubelet之后到master节点允许worker加入(批准worker的tls证书请求) #--------*在主节点执行*--------- $ kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve #----------------------------- #部署kube-proxy(工作节点) #生成proxy证书 mkdir -p /etc/kubernetes/ca/kube-proxy #准备proxy证书配置 - proxy只需客户端证书,因此证书请求中 hosts 字段可以为空。 #CN 指定该证书的 User 为 system:kube-proxy,预定义的 ClusterRoleBinding system:node-proxy 将User system:kube-proxy 与 Role system:node-proxier 绑定,授予了调用 kube-api-server proxy的相关 API 的权限 cat /etc/kubernetes/ca/kube-proxy/kube-proxy-csr.json [root@C2 ~]# cat /etc/kubernetes/ca/kube-proxy/kube-proxy-csr.json { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/kube-proxy/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy #同上配置系统服务并启动 [root@C2 ~]# cat /lib/systemd/system/kube kube-calico.service kubelet.service kube-proxy.service [root@C2 ~]# cat /lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/root/bin/kube-proxy \ --bind-address=192.168.66.136 \ --hostname-override=192.168.66.136 \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target ps:因为需要依赖conntrack-tools,所以在启动之前,执行yum install conntrack-tools -y #部署kube-dns(k8s app) #在主节点准备如下文件 cat kube-dns.yaml --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.68.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns addonmanager.kubernetes.io/mode: Reconcile spec: strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-kube-dns-amd64:1.14.5 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: 170Mi requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain=cluster.local. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-dnsmasq-nanny-amd64:1.14.5 livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --log-facility=- - --server=/cluster.local./127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-sidecar-amd64:1.14.5 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns kubectl create -f ~/kube-dns.yaml
为天地立心,为生民立命,为往圣继绝学,为万世开太平。