Kubernetes进阶实战读书笔记:案例-使用ingress发布tomcat

一、准备名称空间

资源清单:

[root@master chapter6]# cat testing-namespace.yaml 
kind: Namespace
apiVersion: v1
metadata:
  name: testing
  labels:
    env: testing

创建:

[root@master chapter6]# kubectl apply -f testing-namespace.yaml 
namespace/testing created

确认资源的存在:

[root@master chapter6]# kubectl get ns testing
NAME STATUS AGE
testing Active 11s

二、部署tomcat实例

创建:

[root@master chapter6]# kubectl apply -f tomcat-deploy.yaml 
deployment.apps/tomcat-deploy created

验证:

[root@master chapter6]# kubectl get pods -n testing -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
tomcat-deploy-5c6746469-mwkp7 1/1 Running 0 5m24s 10.244.2.2 nodes2 <none> <none>
tomcat-deploy-5c6746469-sjswn 1/1 Running 0 5m24s 10.244.0.88 master <none> <none>

三、创建service资源

资源清单:

[root@master chapter6]# cat tomcat-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: tomcat-svc
  namespace: testing
  labels:
    app: tomcat-svc
spec:
  selector:
    app: tomcat
  ports:
  - name: httpport
    port: 80
    targetPort: 8080
    protocol: TCP

创建:

[root@master chapter6]# kubectl apply -f tomcat-svc.yaml 
service/tomcat-svc created

确认:

[root@master chapter6]# kubectl get svc tomcat-svc -n testing
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
tomcat-svc ClusterIP 10.105.240.206 <none> 80/TCP 41s

四、创建ingress资源

资源清单:

[root@master chapter6]# cat tomcat-ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: tomcat
  namespace: testing
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  rules:
  - host: tomcat.ikubernetes.io
    http:
      paths:
      - path: 
        backend:
          serviceName: tomcat-svc
          servicePort: 80

创建运行:

[root@master chapter6]# kubectl apply -f tomcat-ingress.yaml 
ingress.extensions/tomcat created

验证:

[root@master chapter6]# kubectl describe ingress -n testing 
Name:             tomcat
Namespace:        testing
Address:          192.168.118.18
Default backend:  default-http-backend:80 (<error: endpoints "default-http-backend" not found>)
Rules:
  Host                   Path  Backends
  ----                   ----  --------
  tomcat.ikubernetes.io  
                            tomcat-svc:80 (10.244.0.88:8080,10.244.2.2:8080)
Annotations:             kubernetes.io/ingress.class: nginx
Events:
  Type    Reason  Age   From                      Message
  ----    ------  ----  ----                      -------
  Normal  CREATE  11m   nginx-ingress-controller  Ingress testing/tomcat
  Normal  UPDATE  11m   nginx-ingress-controller  Ingress testing/tomcat

[root@master chapter6]# kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.209.11   <none>        80:30724/TCP,443:32624/TCP   30m
ingress-nginx-controller-admission   ClusterIP   10.96.105.3    <none>        443/TCP                      30m
nginx-ingress-controller             NodePort    10.99.99.99    <none>        80:30080/TCP,443:30443/TCP   13m

五、进入容器查看nginx配置文件验证刚才的配置

进入容器

[root@master chapter6]# kubectl exec -n ingress-nginx -it ingress-nginx-controller-5d9498494d-97blq -- /bin/sh
/etc/nginx $ ls
fastcgi.conf geoip mime.types nginx.conf scgi_params uwsgi_params.default
fastcgi.conf.default koi-utf mime.types.default nginx.conf.default scgi_params.default win-utf
fastcgi_params koi-win modsecurity opentracing.json template
fastcgi_params.default lua modules owasp-modsecurity-crs uwsgi_params
/etc/nginx $ 

nginx配置文件

/etc/nginx $ cat nginx.conf

# Configuration checksum: 7643592438155657136

# setup custom paths that do not require root access
pid /tmp/nginx.pid;

daemon off;

worker_processes 4;

worker_rlimit_nofile 261120;

worker_shutdown_timeout 240s ;

events {
	multi_accept        on;
	worker_connections  16384;
	use                 epoll;
}

http {
	lua_package_path "/etc/nginx/lua/?.lua;;";
	
	lua_shared_dict balancer_ewma 10M;
	lua_shared_dict balancer_ewma_last_touched_at 10M;
	lua_shared_dict balancer_ewma_locks 1M;
	lua_shared_dict certificate_data 20M;
	lua_shared_dict certificate_servers 5M;
	lua_shared_dict configuration_data 20M;
	lua_shared_dict ocsp_response_cache 5M;
	
	init_by_lua_block {
		collectgarbage("collect")
		
		-- init modules
		local ok, res
		
		ok, res = pcall(require, "lua_ingress")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		lua_ingress = res
		lua_ingress.set_config({
			use_forwarded_headers = false,
			use_proxy_protocol = false,
			is_ssl_passthrough_enabled = false,
			http_redirect_code = 308,
		listen_ports = { ssl_proxy = "442", https = "443" },
			
			hsts = true,
			hsts_max_age = 15724800,
			hsts_include_subdomains = true,
			hsts_preload = false,
		})
		end
		
		ok, res = pcall(require, "configuration")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		configuration = res
		end
		
		ok, res = pcall(require, "balancer")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		balancer = res
		end
		
		ok, res = pcall(require, "monitor")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		monitor = res
		end
		
		ok, res = pcall(require, "certificate")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		certificate = res
		certificate.is_ocsp_stapling_enabled = false
		end
		
		ok, res = pcall(require, "plugins")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		plugins = res
		end
		-- load all plugins that'll be used here
	plugins.init({  })
	}
	
	init_worker_by_lua_block {
		lua_ingress.init_worker()
		balancer.init_worker()
		
		monitor.init_worker(10000)
		
		plugins.run()
	}
	
	geoip_country       /etc/nginx/geoip/GeoIP.dat;
	geoip_city          /etc/nginx/geoip/GeoLiteCity.dat;
	geoip_org           /etc/nginx/geoip/GeoIPASNum.dat;
	geoip_proxy_recursive on;
	
	aio                 threads;
	aio_write           on;
	
	tcp_nopush          on;
	tcp_nodelay         on;
	
	log_subrequest      on;
	
	reset_timedout_connection on;
	
	keepalive_timeout  75s;
	keepalive_requests 100;
	
	client_body_temp_path           /tmp/client-body;
	fastcgi_temp_path               /tmp/fastcgi-temp;
	proxy_temp_path                 /tmp/proxy-temp;
	ajp_temp_path                   /tmp/ajp-temp;
	
	client_header_buffer_size       1k;
	client_header_timeout           60s;
	large_client_header_buffers     4 8k;
	client_body_buffer_size         8k;
	client_body_timeout             60s;
	
	http2_max_field_size            4k;
	http2_max_header_size           16k;
	http2_max_requests              1000;
	http2_max_concurrent_streams    128;
	
	types_hash_max_size             2048;
	server_names_hash_max_size      1024;
	server_names_hash_bucket_size   64;
	map_hash_bucket_size            64;
	
	proxy_headers_hash_max_size     512;
	proxy_headers_hash_bucket_size  64;
	
	variables_hash_bucket_size      256;
	variables_hash_max_size         2048;
	
	underscores_in_headers          off;
	ignore_invalid_headers          on;
	
	limit_req_status                503;
	limit_conn_status               503;
	
	include /etc/nginx/mime.types;
	default_type text/html;
	
	gzip on;
	gzip_comp_level 5;
	gzip_http_version 1.1;
	gzip_min_length 256;
	gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/javascript text/plain text/x-component;
	gzip_proxied any;
	gzip_vary on;
	
	# Custom headers for response
	
	server_tokens on;
	
	# disable warnings
	uninitialized_variable_warn off;
	
	# Additional available variables:
	# $namespace
	# $ingress_name
	# $service_name
	# $service_port
	log_format upstreaminfo '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] [$proxy_alternative_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status $req_id';
	
	map $request_uri $loggable {
		
		default 1;
	}
	
	access_log /var/log/nginx/access.log upstreaminfo  if=$loggable;
	
	error_log  /var/log/nginx/error.log notice;
	
	resolver 10.96.0.10 valid=30s ipv6=off;
	
	# See https://www.nginx.com/blog/websocket-nginx
	map $http_upgrade $connection_upgrade {
		default          upgrade;
		
		# See http://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive
		''               '';
		
	}
	
	# Reverse proxies can detect if a client provides a X-Request-ID header, and pass it on to the backend server.
	# If no such header is provided, it can provide a random value.
	map $http_x_request_id $req_id {
		default   $http_x_request_id;
		
		""        $request_id;
		
	}
	
	# Create a variable that contains the literal $ character.
	# This works because the geo module will not resolve variables.
	geo $literal_dollar {
		default "$";
	}
	
	server_name_in_redirect off;
	port_in_redirect        off;
	
	ssl_protocols TLSv1.2 TLSv1.3;
	
	ssl_early_data off;
	
	# turn on session caching to drastically improve performance
	
	ssl_session_cache builtin:1000 shared:SSL:10m;
	ssl_session_timeout 10m;
	
	# allow configuring ssl session tickets
	ssl_session_tickets on;
	
	# slightly reduce the time-to-first-byte
	ssl_buffer_size 4k;
	
	# allow configuring custom ssl ciphers
	ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
	ssl_prefer_server_ciphers on;
	
	ssl_ecdh_curve auto;
	
	# PEM sha: dc1f122996306571729726cc318e9ff1b5efb753
	ssl_certificate     /etc/ingress-controller/ssl/default-fake-certificate.pem;
	ssl_certificate_key /etc/ingress-controller/ssl/default-fake-certificate.pem;
	
	proxy_ssl_session_reuse on;
	
	upstream upstream_balancer {
		### Attention!!!
		#
		# We no longer create "upstream" section for every backend.
		# Backends are handled dynamically using Lua. If you would like to debug
		# and see what backends ingress-nginx has in its memory you can
		# install our kubectl plugin https://kubernetes.github.io/ingress-nginx/kubectl-plugin.
		# Once you have the plugin you can use "kubectl ingress-nginx backends" command to
		# inspect current backends.
		#
		###
		
		server 0.0.0.1; # placeholder
		
		balancer_by_lua_block {
			balancer.balance()
		}
		
		keepalive 32;
		
		keepalive_timeout  60s;
		keepalive_requests 100;
		
	}
	
	# Cache for internal auth checks
	proxy_cache_path /tmp/nginx-cache-auth levels=1:2 keys_zone=auth_cache:10m max_size=128m inactive=30m use_temp_path=off;
	
	# Global filters
	
	## start server _
	server {
		server_name _ ;
		
		listen 80 default_server reuseport backlog=511 ;
		listen 443 default_server reuseport backlog=511 ssl http2 ;
		
		set $proxy_upstream_name "-";
		
		ssl_certificate_by_lua_block {
			certificate.call()
		}
		
		location / {
			
			set $namespace      "";
			set $ingress_name   "";
			set $service_name   "";
			set $service_port   "";
			set $location_path  "/";
			
			rewrite_by_lua_block {
				lua_ingress.rewrite({
					force_ssl_redirect = false,
					ssl_redirect = false,
					force_no_ssl_redirect = false,
					use_port_in_redirects = false,
					force_no_ssl_redirect = false,
					use_port_in_redirects = false,
				})
				balancer.rewrite()
				plugins.run()
			}
			
			# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
			# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
			# other authentication method such as basic auth or external auth useless - all requests will be allowed.
			#access_by_lua_block {
			#}
			
			header_filter_by_lua_block {
				lua_ingress.header()
				plugins.run()
			}
			
			body_filter_by_lua_block {
			}
			
			log_by_lua_block {
				balancer.log()
				
				monitor.call()
				
				plugins.run()
			}
			
			access_log off;
			
			port_in_redirect off;
			
			set $balancer_ewma_score -1;
			set $proxy_upstream_name "upstream-default-backend";
			set $proxy_host          $proxy_upstream_name;
			set $pass_access_scheme  $scheme;
			
			set $pass_server_port    $server_port;
			
			set $best_http_host      $http_host;
			set $pass_port           $pass_server_port;
			
			set $proxy_alternative_upstream_name "";
			
			client_max_body_size                    1m;
			
			proxy_set_header Host                   $best_http_host;
			
			# Pass the extracted client certificate to the backend
			
			# Allow websocket connections
			proxy_set_header                        Upgrade           $http_upgrade;
			
			proxy_set_header                        Connection        $connection_upgrade;
			
			proxy_set_header X-Request-ID           $req_id;
			proxy_set_header X-Real-IP              $remote_addr;
			
			proxy_set_header X-Forwarded-For        $remote_addr;
			
			proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
			
			proxy_set_header X-Forwarded-Host       $best_http_host;
			proxy_set_header X-Forwarded-Port       $pass_port;
			
			proxy_set_header X-Scheme               $pass_access_scheme;
			
			# Pass the original X-Forwarded-For
			proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
			
			# mitigate HTTPoxy Vulnerability
			# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
			proxy_set_header Proxy                  "";
			
			# Custom headers to proxied server
			
			proxy_connect_timeout                   5s;
			proxy_send_timeout                      60s;
			proxy_read_timeout                      60s;
			
			proxy_buffering                         off;
			proxy_buffer_size                       4k;
			proxy_buffers                           4 4k;
			
			proxy_max_temp_file_size                1024m;
			
			proxy_request_buffering                 on;
			proxy_http_version                      1.1;
			
			proxy_cookie_domain                     off;
			proxy_cookie_path                       off;
			
			# In case of errors try the next upstream server before returning an error
			proxy_next_upstream                     error timeout;
			proxy_next_upstream_timeout             0;
			proxy_next_upstream_tries               3;
			
			proxy_pass http://upstream_balancer;
			
			proxy_redirect                          off;
			
		}
		
		# health checks in cloud providers require the use of port 80
		location /healthz {
			
			access_log off;
			return 200;
		}
		
		# this is required to avoid error if nginx is being monitored
		# with an external software (like sysdig)
		location /nginx_status {
			
			allow 127.0.0.1;
			
			deny all;
			
			access_log off;
			stub_status on;
		}
		
	}
	## end server _
	
	## start server tomcat.ikubernetes.io
	server {
		server_name tomcat.ikubernetes.io ;
		
		listen 80  ;
		listen 443  ssl http2 ;
		
		set $proxy_upstream_name "-";
		
		ssl_certificate_by_lua_block {
			certificate.call()
		}
		
		location / {
			
			set $namespace      "testing";
			set $ingress_name   "tomcat";
			set $service_name   "tomcat-svc";
			set $service_port   "80";
			set $location_path  "/";
			
			rewrite_by_lua_block {
				lua_ingress.rewrite({
					force_ssl_redirect = false,
					ssl_redirect = true,
					force_no_ssl_redirect = false,
					use_port_in_redirects = false,
				})
				balancer.rewrite()
				plugins.run()
			}
			
			# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
			# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
			# other authentication method such as basic auth or external auth useless - all requests will be allowed.
			#access_by_lua_block {
			#}
			
			header_filter_by_lua_block {
				lua_ingress.header()
				plugins.run()
			}
			
			body_filter_by_lua_block {
			}
			
			log_by_lua_block {
				balancer.log()
				
				monitor.call()
				
				plugins.run()
			}
			
			port_in_redirect off;
			
			set $balancer_ewma_score -1;
			set $proxy_upstream_name "testing-tomcat-svc-80";
			set $proxy_host          $proxy_upstream_name;
			set $pass_access_scheme  $scheme;
			
			set $pass_server_port    $server_port;
			
			set $best_http_host      $http_host;
			set $pass_port           $pass_server_port;
			
			set $proxy_alternative_upstream_name "";
			
			client_max_body_size                    1m;
			
			proxy_set_header Host                   $best_http_host;
			
			# Pass the extracted client certificate to the backend
			
			# Allow websocket connections
			proxy_set_header                        Upgrade           $http_upgrade;
			
			proxy_set_header                        Connection        $connection_upgrade;
			
			proxy_set_header X-Request-ID           $req_id;
			proxy_set_header X-Real-IP              $remote_addr;
			
			proxy_set_header X-Forwarded-For        $remote_addr;
			
			proxy_set_header X-Forwarded-Proto      $pass_access_scheme;
			
			proxy_set_header X-Forwarded-Host       $best_http_host;
			proxy_set_header X-Forwarded-Port       $pass_port;
			
			proxy_set_header X-Scheme               $pass_access_scheme;
			
			# Pass the original X-Forwarded-For
			proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
			
			# mitigate HTTPoxy Vulnerability
			# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
			proxy_set_header Proxy                  "";
			
			# Custom headers to proxied server
			
			proxy_connect_timeout                   5s;
			proxy_send_timeout                      60s;
			proxy_read_timeout                      60s;
			
			proxy_buffering                         off;
			proxy_buffer_size                       4k;
			proxy_buffers                           4 4k;
			
			proxy_max_temp_file_size                1024m;
			
			proxy_request_buffering                 on;
			proxy_http_version                      1.1;
			
			proxy_cookie_domain                     off;
			proxy_cookie_path                       off;
			
			# In case of errors try the next upstream server before returning an error
			proxy_next_upstream                     error timeout;
			proxy_next_upstream_timeout             0;
			proxy_next_upstream_tries               3;
			
			proxy_pass http://upstream_balancer;
			
			proxy_redirect                          off;
			
		}
		
	}
	## end server tomcat.ikubernetes.io
	
	# backend for when default-backend-service is not configured or it does not have endpoints
	server {
		listen 8181 default_server reuseport backlog=511;
		
		set $proxy_upstream_name "internal";
		
		access_log off;
		
		location / {
			return 404;
		}
	}
	
	# default server, used for NGINX healthcheck and access to nginx stats
	server {
		listen 127.0.0.1:10246;
		set $proxy_upstream_name "internal";
		
		keepalive_timeout 0;
		gzip off;
		
		access_log off;
		
		location /healthz {
			return 200;
		}
		
		location /is-dynamic-lb-initialized {
			content_by_lua_block {
				local configuration = require("configuration")
				local backend_data = configuration.get_backends_data()
				if not backend_data then
				ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
				return
				end
				
				ngx.say("OK")
				ngx.exit(ngx.HTTP_OK)
			}
		}
		
		location /nginx_status {
			stub_status on;
		}
		
		location /configuration {
			client_max_body_size                    21m;
			client_body_buffer_size                 21m;
			proxy_buffering                         off;
			
			content_by_lua_block {
				configuration.call()
			}
		}
		
		location / {
			content_by_lua_block {
				ngx.exit(ngx.HTTP_NOT_FOUND)
			}
		}
	}
}

stream {
	lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;;";
	
	lua_shared_dict tcp_udp_configuration_data 5M;
	
	init_by_lua_block {
		collectgarbage("collect")
		
		-- init modules
		local ok, res
		
		ok, res = pcall(require, "configuration")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		configuration = res
		end
		
		ok, res = pcall(require, "tcp_udp_configuration")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		tcp_udp_configuration = res
		end
		
		ok, res = pcall(require, "tcp_udp_balancer")
		if not ok then
		error("require failed: " .. tostring(res))
		else
		tcp_udp_balancer = res
		end
	}
	
	init_worker_by_lua_block {
		tcp_udp_balancer.init_worker()
	}
	
	lua_add_variable $proxy_upstream_name;
	
	log_format log_stream '[$remote_addr] [$time_local] $protocol $status $bytes_sent $bytes_received $session_time';
	
	access_log /var/log/nginx/access.log log_stream ;
	
	error_log  /var/log/nginx/error.log;
	
	upstream upstream_balancer {
		server 0.0.0.1:1234; # placeholder
		
		balancer_by_lua_block {
			tcp_udp_balancer.balance()
		}
	}
	
	server {
		listen 127.0.0.1:10247;
		
		access_log off;
		
		content_by_lua_block {
			tcp_udp_configuration.call()
		}
	}
	
	# TCP services
	
	# UDP services
	
}

/etc/nginx $ 

重要看下面内容:

server {
		server_name tomcat.ikubernetes.io ;
		
		listen 80  ;
		listen 443  ssl http2 ;
		
		set $proxy_upstream_name "-";
		
		ssl_certificate_by_lua_block {
			certificate.call()
		}
		
		location / {
			
			set $namespace      "testing";
			set $ingress_name   "tomcat";
			set $service_name   "tomcat-svc";
			set $service_port   "80";
			set $location_path  "/";
			
			rewrite_by_lua_block {
				lua_ingress.rewrite({
					force_ssl_redirect = false,
					ssl_redirect = true,
					force_no_ssl_redirect = false,
					use_port_in_redirects = false,
				})
				balancer.rewrite()
				plugins.run()
			}

六、配置TLS lngress资源

如果有基于HTTPS通信需求、那么它应该由外部的负载均衡器予以实现、并在SSL会话卸载后将访问请求转发到ingress控制器接受客户端请求的需求、又期望他们能够提供HTTPS服务时、就应该配置TLS类型ingress资源

将此服务公开发布到互联网时、HTTPS服务用到的证书应由公信CA签署并颁发、用户遵循其相应流程准备好相关的数字证书即可

如果出于测试或内部使用之目的、那么也可以选择自制私有证书

自制私有证书

[root@master tomcat]# openssl genrsa -out tls.key 2048
Generating RSA private key, 2048 bit long modulus
..........................+++
...................+++
e is 65537 (0x10001)

[root@master tomcat]# openssl req -new -x509 -key tls.key -out tls.crt -subj /C=CN/ST=Xian/L=Xian/O=DevOps/CN=tomcat.liunux.io -days 3650

[root@master tomcat]# ll
total 12
-rw-r--r-- 1 root root 1277 Jul 30 15:00 tls.crt
-rw-r--r-- 1 root root 1675 Jul 30 14:56 tls.key

在ingress控制器上配置HTTPS主机时、不能直接使用私钥和证书文件

创建一个TLS类型名为:tomcat-ingress-secret的secret资源

[root@master tomcat]# kubectl create secret tls tomcat-ingress-secret --cert=tls.crt --key=tls.key -n testing
secret/tomcat-ingress-secret created

确认secret资源tomcat-ingress-secret创建成功完成

[root@master tomcat]# kubectl get secrets tomcat-ingress-secret -n testing
NAME TYPE DATA AGE
tomcat-ingress-secret kubernetes.io/tls 2 2m11s

定义TLS类型ingress资源的配置清单

[root@master tomcat]# cp ../tomcat-ingress-tls.yaml . 
[root@master tomcat]# cat tomcat-ingress-tls.yaml 
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: tomcat-ingress-tls
  namespace: testing
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
  - hosts:
    - tomcat.ikubernetes.io
    secretName: tomcat-ingress-secret
  rules:
  - host: tomcat.ikubernetes.io
    http:
      paths:
      - path: /
        backend:
          serviceName: tomcat-svc
          servicePort: 80

创建运行

kubectl apply -f tomcat-ingress-tls.yaml

  

 

posted @ 2020-07-30 22:20  活的潇洒80  阅读(2408)  评论(0编辑  收藏  举报