k8s ingress

https://www.cnblogs.com/hanxing/p/16062860.html

192.168.1.222 k8s-master11 m1

192.168.1.223 k8s-master12 m2
192.168.1.224 k8s-master13 m3
192.168.1.225 k8s-node01 n1
192.168.1.226 k8s-node02 n2
keepalived和haproxy装在n1和n2上,VIP为   192.168.1.123 
 
安装 ingress-controller
docker  pull quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.0
各个节点可先pull镜像
[root@k8s-node02 ~]#  docker  pull quay.io/kubernetes-ingress-controller/nginx-ingress-controller 


[root@k8s-master11 ~]# kubectl apply -f ingress-nginx-install.yml 
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
ingress-nginx-install.yml 如下:
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
      - "networking.k8s.io"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      serviceAccountName: nginx-ingress-serviceaccount
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.25.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 33
            runAsUser: 33
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10

---

检查  ingress-controller (其实就是个ng)

[root@k8s-master11 ~]# kubectl get po -A -o wide|grep ingre       
ingress-nginx   nginx-ingress-controller-7995bd9c47-qn277   1/1     Running   0          111m    10.244.3.3      k8s-node01     <none>           <none>

[root@k8s-master11 ~]# kubectl get deploy -A|grep  ingress
ingress-nginx   nginx-ingress-controller   1/1     1            1           110m

查 ingress的ng配置

[root@k8s-master11 ~]# kubectl exec -it po/nginx-ingress-controller-7995bd9c47-qn277 -n ingress-nginx sh
$ ps -ef |grep nginx
www-data      1      0  0 07:38 ?        00:00:00 /usr/bin/dumb-init -- /nginx-ingress-controller --configmap=ingress-nginx/nginx-configuration --tcp-services-configmap=ingress-nginx/tcp-services --udp-services-configmap=ingress-nginx/udp-services --publish-service=ingress-nginx/ingress-nginx --annotations-prefix=nginx.ingress.kubernetes.io
www-data      8      1  3 07:38 ?        00:06:22 /nginx-ingress-controller --configmap=ingress-nginx/nginx-configuration --tcp-services-configmap=ingress-nginx/tcp-services --udp-services-configmap=ingress-nginx/udp-services --publish-service=ingress-nginx/ingress-nginx --annotations-prefix=nginx.ingress.kubernetes.io
www-data     34      8  0 07:38 ?        00:00:02 nginx: master process /usr/local/openresty/nginx/sbin/nginx -c /etc/nginx/nginx.conf
www-data    466     34  0 08:13 ?        00:00:11 nginx: worker process
www-data    467     34  0 08:13 ?        00:00:11 nginx: worker process
www-data    468     34  0 08:13 ?        00:00:10 nginx: worker process
www-data    469     34  0 08:13 ?        00:00:11 nginx: worker process
www-data    606    598  0 11:05 pts/0    00:00:00 grep nginx
$ cat /etc/ng   ^H^H^H^H^C
$ cat /etc/nginx/nginx.conf

# Configuration checksum: 825640207497700627

# setup custom paths that do not require root access
pid /tmp/nginx.pid;

daemon off;

worker_processes 4;

worker_rlimit_nofile 15360;

worker_shutdown_timeout 10s ;

events {
        multi_accept        on;
        worker_connections  16384;
        use                 epoll;
}

http {
        lua_package_path        "/usr/local/openresty/site/lualib/?.ljbc;/usr/local/openresty/site/lualib/?/init.ljbc;/usr/local/openresty/lualib/?.ljbc;/usr/local/openresty/lualib/?/init.ljbc;/usr/local/openresty/site/lualib/?.lua;/usr/local/openresty/site/lualib/?/init.lua;/usr/local/openresty/lualib/?.lua;/usr/local/openresty/lualib/?/init.lua;./?.lua;/usr/local/openresty/luajit/share/luajit-2.1.0-beta3/?.lua;/usr/local/share/lua/5.1/?.lua;/usr/local/share/lua/5.1/?/init.lua;/usr/local/openresty/luajit/share/lua/5.1/?.lua;/usr/local/openresty/luajit/share/lua/5.1/?/init.lua;/usr/local/lib/lua/?.lua;;";
        lua_package_cpath       "/usr/local/openresty/site/lualib/?.so;/usr/local/openresty/lualib/?.so;./?.so;/usr/local/lib/lua/5.1/?.so;/usr/local/openresty/luajit/lib/lua/5.1/?.so;/usr/local/lib/lua/5.1/loadall.so;/usr/local/openresty/luajit/lib/lua/5.1/?.so;;";

        lua_shared_dict configuration_data 15M;
        lua_shared_dict certificate_data 16M;

        init_by_lua_block {
                collectgarbage("collect")

                local lua_resty_waf = require("resty.waf")
                lua_resty_waf.init()

                -- init modules
                local ok, res

                ok, res = pcall(require, "lua_ingress")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                lua_ingress = res
                lua_ingress.set_config({
                        use_forwarded_headers = false,
                        is_ssl_passthrough_enabled = false,
                        http_redirect_code = 308,
                listen_ports = { ssl_proxy = "442", https = "443" },
                })
                end

                ok, res = pcall(require, "configuration")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                configuration = res
        configuration.nameservers = { "10.96.0.10" }
                end

                ok, res = pcall(require, "balancer")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                balancer = res
                end

                ok, res = pcall(require, "monitor")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                monitor = res
                end

                ok, res = pcall(require, "certificate")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                certificate = res
                end

                ok, res = pcall(require, "plugins")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                plugins = res
                end
                -- load all plugins that'll be used here
        plugins.init({})
        }

        init_worker_by_lua_block {
                lua_ingress.init_worker()
                balancer.init_worker()

                monitor.init_worker()

                plugins.run()
        }

        geoip_country       /etc/nginx/geoip/GeoIP.dat;
        geoip_city          /etc/nginx/geoip/GeoLiteCity.dat;
        geoip_org           /etc/nginx/geoip/GeoIPASNum.dat;
        geoip_proxy_recursive on;

        aio                 threads;
        aio_write           on;

        tcp_nopush          on;
        tcp_nodelay         on;

        log_subrequest      on;

        reset_timedout_connection on;

        keepalive_timeout  75s;
        keepalive_requests 100;

        client_body_temp_path           /tmp/client-body;
        fastcgi_temp_path               /tmp/fastcgi-temp;
        proxy_temp_path                 /tmp/proxy-temp;
        ajp_temp_path                   /tmp/ajp-temp;

        client_header_buffer_size       1k;
        client_header_timeout           60s;
        large_client_header_buffers     4 8k;
        client_body_buffer_size         8k;
        client_body_timeout             60s;

        http2_max_field_size            4k;
        http2_max_header_size           16k;
        http2_max_requests              1000;

        types_hash_max_size             2048;
        server_names_hash_max_size      1024;
        server_names_hash_bucket_size   32;
        map_hash_bucket_size            64;

        proxy_headers_hash_max_size     512;
        proxy_headers_hash_bucket_size  64;

        variables_hash_bucket_size      128;
        variables_hash_max_size         2048;

        underscores_in_headers          off;
        ignore_invalid_headers          on;

        limit_req_status                503;
        limit_conn_status               503;

        include /etc/nginx/mime.types;
        default_type text/html;

        gzip on;
        gzip_comp_level 5;
        gzip_http_version 1.1;
        gzip_min_length 256;
        gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component;
        gzip_proxied any;
        gzip_vary on;

        # Custom headers for response

        server_tokens on;

        # disable warnings
        uninitialized_variable_warn off;

        # Additional available variables:
        # $namespace
        # $ingress_name
        # $service_name
        # $service_port
        log_format upstreaminfo '$the_real_ip - [$the_real_ip] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id';

        map $request_uri $loggable {

                default 1;
        }

        access_log /var/log/nginx/access.log upstreaminfo  if=$loggable;

        error_log  /var/log/nginx/error.log notice;

        resolver 10.96.0.10 valid=30s ipv6=off;

        # See https://www.nginx.com/blog/websocket-nginx
        map $http_upgrade $connection_upgrade {
                default          upgrade;

                # See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive
                ''               '';

        }

        # The following is a sneaky way to do "set $the_real_ip $remote_addr"
        # Needed because using set is not allowed outside server blocks.
        map '' $the_real_ip {

                default          $remote_addr;

        }

        # Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server.
        # If no such header is provided, it can provide a random value.
        map $http_x_request_id $req_id {
                default   $http_x_request_id;

                ""        $request_id;

        }

        # Create a variable that contains the literal $ character.
        # This works because the geo module will not resolve variables.
        geo $literal_dollar {
                default "$";
        }

        server_name_in_redirect off;
        port_in_redirect        off;

        ssl_protocols TLSv1.2;

        # turn on session caching to drastically improve performance

        ssl_session_cache builtin:1000 shared:SSL:10m;
        ssl_session_timeout 10m;

        # allow configuring ssl session tickets
        ssl_session_tickets on;

        # slightly reduce the time-to-first-byte
        ssl_buffer_size 4k;

        # allow configuring custom ssl ciphers
        ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256';
        ssl_prefer_server_ciphers on;

        ssl_ecdh_curve auto;

        proxy_ssl_session_reuse on;

        upstream upstream_balancer {
                server 0.0.0.1; # placeholder

                balancer_by_lua_block {
                        balancer.balance()
                }

                keepalive 32;

                keepalive_timeout  60s;
                keepalive_requests 100;

        }

        # Global filters

        ## start server _
        server {
                server_name _ ;

                listen 80 default_server reuseport backlog=511;

                set $proxy_upstream_name "-";
                set $pass_access_scheme $scheme;
                set $pass_server_port $server_port;
                set $best_http_host $http_host;
                set $pass_port $pass_server_port;

                listen 443  default_server reuseport backlog=511 ssl http2;

                # PEM sha: d89755e71cf6b1cb80fc181271c11978e79fc4ce
                ssl_certificate                         /etc/ingress-controller/ssl/default-fake-certificate.pem;
                ssl_certificate_key                     /etc/ingress-controller/ssl/default-fake-certificate.pem;

                ssl_certificate_by_lua_block {
                        certificate.call()
                }

                location / {

                        set $namespace      "";
                        set $ingress_name   "";
                        set $service_name   "";
                        set $service_port   "0";
                        set $location_path  "/";

                        rewrite_by_lua_block {
                                lua_ingress.rewrite({
                                        force_ssl_redirect = false,
                                        use_port_in_redirects = false,
                                })
                                balancer.rewrite()
                                plugins.run()
                        }

                        header_filter_by_lua_block {

                                plugins.run()
                        }
                        body_filter_by_lua_block {

                        }

                        log_by_lua_block {

                                balancer.log()

                                monitor.call()

                                plugins.run()
                        }

                        if ($scheme = https) {
                                more_set_headers                        "Strict-Transport-Security: max-age=15724800; includeSubDomains";
                        }

                        access_log off;

                        port_in_redirect off;

                        set $balancer_ewma_score -1;
                        set $proxy_upstream_name    "upstream-default-backend";
                        set $proxy_host             $proxy_upstream_name;

                        set $proxy_alternative_upstream_name "";

                        client_max_body_size                    1m;

                        proxy_set_header Host                   $best_http_host;

                        # Pass the extracted client certificate to the backend

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;

                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Request-ID           $req_id;
                        proxy_set_header X-Real-IP              $the_real_ip;

                        proxy_set_header X-Forwarded-For        $the_real_ip;

                        proxy_set_header X-Forwarded-Host       $best_http_host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;

                        proxy_set_header X-Original-URI         $request_uri;

                        proxy_set_header X-Scheme               $pass_access_scheme;

                        # Pass the original X-Forwarded-For
                        proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers to proxied server

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      60s;
                        proxy_read_timeout                      60s;

                        proxy_buffering                         off;
                        proxy_buffer_size                       4k;
                        proxy_buffers                           4 4k;
                        proxy_request_buffering                 on;

                        proxy_http_version                      1.1;

                        proxy_cookie_domain                     off;
                        proxy_cookie_path                       off;

                        # In case of errors try the next upstream server before returning an error
                        proxy_next_upstream                     error timeout;
                        proxy_next_upstream_timeout             0;
                        proxy_next_upstream_tries               3;

                        proxy_pass http://upstream_balancer;

                        proxy_redirect                          off;

                }

                # health checks in cloud providers require the use of port 80
                location /healthz {

                        access_log off;
                        return 200;
                }

                # this is required to avoid error if nginx is being monitored
                # with an external software (like sysdig)
                location /nginx_status {

                        allow 127.0.0.1;

                        deny all;

                        access_log off;
                        stub_status on;
                }

        }
        ## end server _

        ## start server www.zzx.com
        server {
                server_name www.zzx.com ;

                listen 80;

                set $proxy_upstream_name "-";
                set $pass_access_scheme $scheme;
                set $pass_server_port $server_port;
                set $best_http_host $http_host;
                set $pass_port $pass_server_port;

                location / {

                        set $namespace      "default";
                        set $ingress_name   "nginx-ingress";
                        set $service_name   "nginx-svc";
                        set $service_port   "80";
                        set $location_path  "/";

                        rewrite_by_lua_block {
                                lua_ingress.rewrite({
                                        force_ssl_redirect = false,
                                        use_port_in_redirects = false,
                                })
                                balancer.rewrite()
                                plugins.run()
                        }

                        header_filter_by_lua_block {

                                plugins.run()
                        }
                        body_filter_by_lua_block {

                        }

                        log_by_lua_block {

                                balancer.log()

                                monitor.call()

                                plugins.run()
                        }

                        port_in_redirect off;

                        set $balancer_ewma_score -1;
                        set $proxy_upstream_name    "default-nginx-svc-80";
                        set $proxy_host             $proxy_upstream_name;

                        set $proxy_alternative_upstream_name "";

                        client_max_body_size                    1m;

                        proxy_set_header Host                   $best_http_host;

                        # Pass the extracted client certificate to the backend

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;

                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Request-ID           $req_id;
                        proxy_set_header X-Real-IP              $the_real_ip;

                        proxy_set_header X-Forwarded-For        $the_real_ip;

                        proxy_set_header X-Forwarded-Host       $best_http_host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;

                        proxy_set_header X-Original-URI         $request_uri;

                        proxy_set_header X-Scheme               $pass_access_scheme;

                        # Pass the original X-Forwarded-For
                        proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers to proxied server

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      60s;
                        proxy_read_timeout                      60s;

                        proxy_buffering                         off;
                        proxy_buffer_size                       4k;
                        proxy_buffers                           4 4k;
                        proxy_request_buffering                 on;

                        proxy_http_version                      1.1;

                        proxy_cookie_domain                     off;
                        proxy_cookie_path                       off;

                        # In case of errors try the next upstream server before returning an error
                        proxy_next_upstream                     error timeout;
                        proxy_next_upstream_timeout             0;
                        proxy_next_upstream_tries               3;

                        proxy_pass http://upstream_balancer;

                        proxy_redirect                          off;

                }

        }
        ## end server www.zzx.com

        # backend for when default-backend-service is not configured or it does not have endpoints
        server {
                listen 8181 default_server reuseport backlog=511;

                set $proxy_upstream_name "internal";

                access_log off;

                location / {
                        return 404;
                }
        }

        # default server, used for NGINX healthcheck and access to nginx stats
        server {
                listen unix:/tmp/nginx-status-server.sock;
                set $proxy_upstream_name "internal";

                keepalive_timeout 0;
                gzip off;

                access_log off;

                location /healthz {
                        return 200;
                }

                location /is-dynamic-lb-initialized {
                        content_by_lua_block {
                                local configuration = require("configuration")
                                local backend_data = configuration.get_backends_data()
                                if not backend_data then
                                ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
                                return
                                end

                                ngx.say("OK")
                                ngx.exit(ngx.HTTP_OK)
                        }
                }

                location /nginx_status {
                        stub_status on;
                }

                location /configuration {
                        # this should be equals to configuration_data dict
                        client_max_body_size                    10m;
                        client_body_buffer_size                 10m;
                        proxy_buffering                         off;

                        content_by_lua_block {
                                configuration.call()
                        }
                }

                location / {
                        content_by_lua_block {
                                ngx.exit(ngx.HTTP_NOT_FOUND)
                        }
                }
        }
}

stream {
        lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/lua-platform-path/lua/5.1/?.so;;";
        lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;";

        lua_shared_dict tcp_udp_configuration_data 5M;

        init_by_lua_block {
                collectgarbage("collect")

                -- init modules
                local ok, res

                ok, res = pcall(require, "configuration")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                configuration = res
        configuration.nameservers = { "10.96.0.10" }
                end

                ok, res = pcall(require, "tcp_udp_configuration")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                tcp_udp_configuration = res
                end

                ok, res = pcall(require, "tcp_udp_balancer")
                if not ok then
                error("require failed: " .. tostring(res))
                else
                tcp_udp_balancer = res
                end
        }

        init_worker_by_lua_block {
                tcp_udp_balancer.init_worker()
        }

        lua_add_variable $proxy_upstream_name;

        log_format log_stream [$time_local] $protocol $status $bytes_sent $bytes_received $session_time;

        access_log /var/log/nginx/access.log log_stream ;

        error_log  /var/log/nginx/error.log;

        upstream upstream_balancer {
                server 0.0.0.1:1234; # placeholder

                balancer_by_lua_block {
                        tcp_udp_balancer.balance()
                }
        }

        server {
                listen unix:/tmp/ingress-stream.sock;

                content_by_lua_block {
                        tcp_udp_configuration.call()
                }
        }

        # TCP services

        # UDP services

}

$ 

  

 

检查ingress-controller的  svc,是一个NodePort,可用通过外部 31663访问  可编辑看看, kubectl edit svc/ingress-nginx -n ingress-nginx

[root@k8s-master11 ~]# kubectl get svc  -n ingress-nginx -o wide
NAME            TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE   SELECTOR
ingress-nginx   NodePort   10.109.152.173   <none>        80:31663/TCP,443:32333/TCP   65m   app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/part-of=ingress-nginx

  

  

创建 应用 deploy  svc  ingress
[root@k8s-master11 ~]# kubectl apply -f ingress.yml      
deployment.extensions/nginx-deploy created
service/nginx-svc created
ingress.extensions/nginx-ingress created

ingress.yml 如下:
[root@k8s-master11 ~]# cat ingress.yml   
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deploy
spec:
  replicas: 2
  template:
    metadata:
      labels:
        name: nginx-app
    spec:
      containers:
        - name: nginx-app
          image: wangyanglinux/myapp:v1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
spec:
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
  selector:  #匹配deploy的labels
    name: nginx-app
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nginx-ingress
spec:
  rules:
    - host: www.zzx.com
      http:
        paths:
        - path: /
          backend:
            serviceName: nginx-svc     #依赖的svc  metadata:name
            servicePort: 80

  




检查
[root@k8s-master11 ~]# kubectl get deploy/nginx-deploy -o wide
NAME           READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS   IMAGES                   SELECTOR
nginx-deploy   2/2     2            2           6m11s   nginx-app    wangyanglinux/myapp:v1   name=nginx-app
[root@k8s-master11 ~]# 
[root@k8s-master11 ~]# kubectl get svc/nginx-svc -o wide                   
NAME        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE    SELECTOR
nginx-svc   ClusterIP   10.102.69.189   <none>        80/TCP    2m5s   name=nginx-app
[root@k8s-master11 ~]# 
[root@k8s-master11 ~]# kubectl get ingress/nginx-ingress -o wide             
NAME            HOSTS         ADDRESS   PORTS   AGE
nginx-ingress   www.zzx.com             80      2m16s

 

  

浏览器访问  (C:\Windows\System32\drivers\etc\hosts   添加  192.168.1.223  www.zzx.com)

http://www.zzx.com:31663/  

修改/etc/haproxy/haproxy.cfg 

添加
frontend  nginx
   bind *:80
   mode tcp
   default_backend             nginx_ingress

backend nginx_ingress
    balance     roundrobin
    mode tcp
    server  k8s-master11 192.168.1.222:31663 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-master12 192.168.1.223:31663 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-master13 192.168.1.224:31663 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3

C:\Windows\System32\drivers\etc\hosts   改为 192.168.1.123  www.zzx.com

 http://www.zzx.com:80 


验证第二个域名


修改配置文件
[root@k8s-master11 ~]# cp  ingress.yml   ingress-v2.yml   

name lable等添加-v2,image改为v2

[root@k8s-master11 ~]# cat  ingress-v2.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deploy-v2
spec:
  replicas: 2
  template:
    metadata:
      labels:
        name: nginx-app-v2
    spec:
      containers:
        - name: nginx-app-v2
          image: wangyanglinux/myapp:v2
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc-v2
spec:
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
  selector:  #匹配deploy的labels
    name: nginx-app-v2
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nginx-ingress-v2
spec:
  rules:
    - host: www.zzx2.com
      http:
        paths:
        - path: /
          backend:
            serviceName: nginx-svc-v2     #依赖的svc  metadata:name
            servicePort: 80

  启动新的v2版deploy

[root@k8s-master11 ~]# kubectl apply -f ingress-v2.yml 
deployment.extensions/nginx-deploy-v2 unchanged
service/nginx-svc-v2 unchanged
ingress.extensions/nginx-ingress-v2 configured
[root@k8s-master11 ~]# ^C
[root@k8s-master11 ~]# ^C
[root@k8s-master11 ~]# ^C
[root@k8s-master11 ~]# kubectl get po                  
NAME                               READY   STATUS    RESTARTS   AGE
nginx-app-646778d676-959hx         1/1     Running   0          5h5m
nginx-app-646778d676-9s5q6         1/1     Running   0          5h5m
nginx-deploy-5dcb69cfcf-bxkpf      1/1     Running   0          3h29m
nginx-deploy-5dcb69cfcf-wwhmp      1/1     Running   0          3h29m
nginx-deploy-v2-5df7f9f88b-hk2z9   1/1     Running   0          59s
nginx-deploy-v2-5df7f9f88b-phbgx   1/1     Running   0          59s
[root@k8s-master11 ~]# kubectl get po -o wide |grep deploy
nginx-deploy-5dcb69cfcf-bxkpf      1/1     Running   0          3h29m   10.244.2.4   k8s-node02     <none>           <none>
nginx-deploy-5dcb69cfcf-wwhmp      1/1     Running   0          3h29m   10.244.3.4   k8s-node01     <none>           <none>
nginx-deploy-v2-5df7f9f88b-hk2z9   1/1     Running   0          84s     10.244.2.5   k8s-node02     <none>           <none>
nginx-deploy-v2-5df7f9f88b-phbgx   1/1     Running   0          84s     10.244.4.2   k8s-master13   <none>           <none>
[root@k8s-master11 ~]# kubectl get svc -o wide     
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE     SELECTOR
kubernetes     ClusterIP   10.96.0.1       <none>        443/TCP   3h26m   <none>
nginx-svc      ClusterIP   10.102.69.189   <none>        80/TCP    3h25m   name=nginx-app
nginx-svc-v2   ClusterIP   10.104.92.9     <none>        80/TCP    92s     name=nginx-app-v2
[root@k8s-master11 ~]# kubectl get ingress -o wide    
NAME               HOSTS          ADDRESS   PORTS   AGE
nginx-ingress      www.zzx.com              80      3h25m
nginx-ingress-v2   www.zzx2.com             80      2m

ingress-control的ng配置自动会新增如下
    ## start server www.zzx2.com
        server {
                server_name www.zzx2.com ;

                listen 80;

                set $proxy_upstream_name "-";
                set $pass_access_scheme $scheme;
                set $pass_server_port $server_port;
                set $best_http_host $http_host;
                set $pass_port $pass_server_port;

                location / {

                        set $namespace      "default";
                        set $ingress_name   "nginx-ingress-v2";
                        set $service_name   "nginx-svc-v2";
                        set $service_port   "80";
                        set $location_path  "/";

                        rewrite_by_lua_block {
                                lua_ingress.rewrite({
                                        force_ssl_redirect = false,
                                        use_port_in_redirects = false,
                                })
                                balancer.rewrite()
                                plugins.run()
                        }

                        header_filter_by_lua_block {

                                plugins.run()
                        }
                        body_filter_by_lua_block {

                        }

                        log_by_lua_block {

                                balancer.log()

                                monitor.call()

                                plugins.run()
                        }

                        port_in_redirect off;

                        set $balancer_ewma_score -1;
                        set $proxy_upstream_name    "default-nginx-svc-v2-80";
                        set $proxy_host             $proxy_upstream_name;

                        set $proxy_alternative_upstream_name "";

                        client_max_body_size                    1m;

                        proxy_set_header Host                   $best_http_host;

                        # Pass the extracted client certificate to the backend

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;

                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Request-ID           $req_id;
                        proxy_set_header X-Real-IP              $the_real_ip;

                        proxy_set_header X-Forwarded-For        $the_real_ip;

                        proxy_set_header X-Forwarded-Host       $best_http_host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;

                        proxy_set_header X-Original-URI         $request_uri;

                        proxy_set_header X-Scheme               $pass_access_scheme;

                        # Pass the original X-Forwarded-For
                        proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers to proxied server

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      60s;
                        proxy_read_timeout                      60s;

                        proxy_buffering                         off;
                        proxy_buffer_size                       4k;
                        proxy_buffers                           4 4k;
                        proxy_request_buffering                 on;

                        proxy_http_version                      1.1;

                        proxy_cookie_domain                     off;
                        proxy_cookie_path                       off;

                        # In case of errors try the next upstream server before returning an error
                        proxy_next_upstream                     error timeout;
                        proxy_next_upstream_timeout             0;
                        proxy_next_upstream_tries               3;

                        proxy_pass http://upstream_balancer;

                        proxy_redirect                          off;

                }

        }
        ## end server www.zzx2.com

  修改hosts添加  192.168.1.123 www.zzx2.com

浏览器访问http://www.zzx2.com/会指向 v2版的ng

 linux下就用     curl 192.168.1.123:80    -H "Host:www.zzx.com"         curl 192.168.1.123:80    -H "Host:www.zzx2.com"

[root@k8s-master12 .kube]#    curl 192.168.1.123:80      -H "Host:www.zzx.com"
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@k8s-master12 .kube]#      curl 192.168.1.123:80    -H "Host:www.zzx2.com"
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>

  

 

ingress path的用法

[root@k8s-master13 ~]# kubectl get ingress
NAME               HOSTS         ADDRESS   PORTS   AGE
nginx-ingress      www.zzx.com             80      9d
nginx-ingress-v2   www.zzx.com             80      47h

[root@k8s-master13 ~]# kubectl get ingress/nginx-ingress  -o yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"nginx-ingress","namespace":"default"},"spec":{"rules":[{"host":"www.zzx.com","http":{"paths":[{"backend":{"serviceName":"nginx-svc","servicePort":80},"path":"/"}]}}]}}
  creationTimestamp: "2022-03-27T07:58:24Z"
  generation: 5
  name: nginx-ingress
  namespace: default
  resourceVersion: "157685"
  selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/nginx-ingress
  uid: ec81c829-b657-4901-b276-1a6bd6b82944
spec:
  rules:
  - host: www.zzx.com
    http:
      paths:
      - backend:
          serviceName: nginx-svc
          servicePort: 80
        path: /zzx
status:
  loadBalancer: {}


[root@k8s-master13 ~]# kubectl get ingress/nginx-ingress-v2  -o yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{},"name":"nginx-ingress-v2","namespace":"default"},"spec":{"rules":[{"host":"www.zzx.com","http":{"paths":[{"backend":{"serviceName":"nginx-svc-v2","servicePort":80},"path":"/hostname.html/"}]}}]}}
  creationTimestamp: "2022-04-03T10:38:58Z"
  generation: 11
  name: nginx-ingress-v2
  namespace: default
  resourceVersion: "157716"
  selfLink: /apis/extensions/v1beta1/namespaces/default/ingresses/nginx-ingress-v2
  uid: 56c57cd0-0b2c-4b93-b263-2cbad168d286
spec:
  rules:
  - host: www.zzx.com
    http:
      paths:
      - backend:
          serviceName: nginx-svc-v2
          servicePort: 80
        path: /yj
status:
  loadBalancer: {}


curl http://www.zzx.com/yj    会访问 nginx-svc-v2
curl http://www.zzx.com/zzx  会访问 nginx-svc
跟openshift的添加router类似,同一个域名访问不同的deployment

  ng配置

location = /yj {
alias /etc/hosts;
}

 

posted on 2022-03-27 17:27  寒星12345678999  阅读(81)  评论(1编辑  收藏  举报