istio基础
istio基础示例演示
1.部署应用
kubectl label namespace demo istio-injection=enabled
kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################################################################
# This file defines the services, service accounts, and deployments for the Bookinfo sample.
#
# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments:
#
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
#
# Alternatively, you can deploy any resource separately:
#
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount
# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment
##################################################################################################
##################################################################################################
# Details service
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: details
labels:
app: details
service: details
spec:
ports:
- port: 9080
name: http
selector:
app: details
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bookinfo-details
labels:
account: details
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: details-v1
labels:
app: details
version: v1
spec:
replicas: 1
selector:
matchLabels:
app: details
version: v1
template:
metadata:
labels:
app: details
version: v1
spec:
serviceAccountName: bookinfo-details
containers:
- name: details
image: docker.io/istio/examples-bookinfo-details-v1:1.17.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080
securityContext:
runAsUser: 1000
---
##################################################################################################
# Ratings service
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: ratings
labels:
app: ratings
service: ratings
spec:
ports:
- port: 9080
name: http
selector:
app: ratings
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bookinfo-ratings
labels:
account: ratings
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ratings-v1
labels:
app: ratings
version: v1
spec:
replicas: 1
selector:
matchLabels:
app: ratings
version: v1
template:
metadata:
labels:
app: ratings
version: v1
spec:
serviceAccountName: bookinfo-ratings
containers:
- name: ratings
image: docker.io/istio/examples-bookinfo-ratings-v1:1.17.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080
securityContext:
runAsUser: 1000
---
##################################################################################################
# Reviews service
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: reviews
labels:
app: reviews
service: reviews
spec:
ports:
- port: 9080
name: http
selector:
app: reviews
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bookinfo-reviews
labels:
account: reviews
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reviews-v1
labels:
app: reviews
version: v1
spec:
replicas: 1
selector:
matchLabels:
app: reviews
version: v1
template:
metadata:
labels:
app: reviews
version: v1
spec:
serviceAccountName: bookinfo-reviews
containers:
- name: reviews
image: docker.io/istio/examples-bookinfo-reviews-v1:1.17.0
imagePullPolicy: IfNotPresent
env:
- name: LOG_DIR
value: "/tmp/logs"
ports:
- containerPort: 9080
volumeMounts:
- name: tmp
mountPath: /tmp
- name: wlp-output
mountPath: /opt/ibm/wlp/output
securityContext:
runAsUser: 1000
volumes:
- name: wlp-output
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reviews-v2
labels:
app: reviews
version: v2
spec:
replicas: 1
selector:
matchLabels:
app: reviews
version: v2
template:
metadata:
labels:
app: reviews
version: v2
spec:
serviceAccountName: bookinfo-reviews
containers:
- name: reviews
image: docker.io/istio/examples-bookinfo-reviews-v2:1.17.0
imagePullPolicy: IfNotPresent
env:
- name: LOG_DIR
value: "/tmp/logs"
ports:
- containerPort: 9080
volumeMounts:
- name: tmp
mountPath: /tmp
- name: wlp-output
mountPath: /opt/ibm/wlp/output
securityContext:
runAsUser: 1000
volumes:
- name: wlp-output
emptyDir: {}
- name: tmp
emptyDir: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: reviews-v3
labels:
app: reviews
version: v3
spec:
replicas: 1
selector:
matchLabels:
app: reviews
version: v3
template:
metadata:
labels:
app: reviews
version: v3
spec:
serviceAccountName: bookinfo-reviews
containers:
- name: reviews
image: docker.io/istio/examples-bookinfo-reviews-v3:1.17.0
imagePullPolicy: IfNotPresent
env:
- name: LOG_DIR
value: "/tmp/logs"
ports:
- containerPort: 9080
volumeMounts:
- name: tmp
mountPath: /tmp
- name: wlp-output
mountPath: /opt/ibm/wlp/output
securityContext:
runAsUser: 1000
volumes:
- name: wlp-output
emptyDir: {}
- name: tmp
emptyDir: {}
---
##################################################################################################
# Productpage services
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: productpage
labels:
app: productpage
service: productpage
spec:
ports:
- port: 9080
name: http
selector:
app: productpage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: bookinfo-productpage
labels:
account: productpage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: productpage-v1
labels:
app: productpage
version: v1
spec:
replicas: 1
selector:
matchLabels:
app: productpage
version: v1
template:
metadata:
labels:
app: productpage
version: v1
spec:
serviceAccountName: bookinfo-productpage
containers:
- name: productpage
image: docker.io/istio/examples-bookinfo-productpage-v1:1.17.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080
volumeMounts:
- name: tmp
mountPath: /tmp
securityContext:
runAsUser: 1000
volumes:
- name: tmp
emptyDir: {}
---
2.测试
kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o "<title>.*</title>"
3. 为应用程序定义 Ingress 网关
kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: bookinfo-gateway
spec:
selector:
istio: ingressgateway # use istio default controller
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: bookinfo
spec:
hosts:
- "*"
gateways:
- bookinfo-gateway
http:
- match:
- uri:
exact: /productpage
- uri:
prefix: /static
- uri:
exact: /login
- uri:
exact: /logout
- uri:
prefix: /api/v1/products
route:
- destination:
host: productpage
port:
number: 9080
4.确定网关入口
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}')
export TCP_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="tcp")].nodePort}')
export INGRESS_HOST=$(kubectl get po -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].status.hostIP}')
export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
echo $GATEWAY_URL
5.外部访问
curl -s http://${GATEWAY_URL}/productpage | grep -o "<title>.*</title>"
6.应用默认目标规则
kubectl apply -f samples/bookinfo/networking/destination-rule-all.yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: productpage
spec:
host: productpage
subsets:
- name: v1
labels:
version: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: reviews
spec:
host: reviews
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
- name: v3
labels:
version: v3
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: ratings
spec:
host: ratings
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
- name: v2-mysql
labels:
version: v2-mysql
- name: v2-mysql-vm
labels:
version: v2-mysql-vm
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: details
spec:
host: details
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
流量管理
配置请求路由
仅路由到一个版本,请应用为微服务设置默认版本的 Virtual Service。在这种情况下,Virtual Service 将所有流量路由到每个微服务的 v1
版本
kubectl apply -f samples/bookinfo/networking/virtual-service-all-v1.yaml
$ kubectl get virtualservices -o yaml
- apiVersion: networking.istio.io/v1beta1
kind: VirtualService
...
spec:
hosts:
- details
http:
- route:
- destination:
host: details
subset: v1
- apiVersion: networking.istio.io/v1beta1
kind: VirtualService
...
spec:
hosts:
- productpage
http:
- route:
- destination:
host: productpage
subset: v1
- apiVersion: networking.istio.io/v1beta1
kind: VirtualService
...
spec:
hosts:
- ratings
http:
- route:
- destination:
host: ratings
subset: v1
- apiVersion: networking.istio.io/v1beta1
kind: VirtualService
...
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v1
您可以通过再次刷新 Bookinfo 应用程序的 /productpage
轻松测试新配置。
在浏览器中打开 Bookinfo 站点。网址为 http://$GATEWAY_URL/productpage
,其中 $GATEWAY_URL
是外部的入口 IP 地址,如 Bookinfo 文档中所述。
请注意,无论您刷新多少次,页面的评论部分都不会显示评级星标。这是因为您将 Istio 配置为将评论服务的所有流量路由到版本 reviews:v1
,而此版本的服务不访问星级评分服务。
您已成功完成此任务的第一部分:将流量路由到服务的某一个版本。
基于用户身份的路由
接下来,您将更改路由配置,以便将来自特定用户的所有流量路由到特定服务版本。在这种情况下,来自名为 Jason 的用户的所有流量将被路由到服务 reviews:v2
。
请注意,Istio 对用户身份没有任何特殊的内置机制。事实上,productpage
服务在所有到 reviews
服务的 HTTP 请求中都增加了一个自定义的 end-user
请求头,从而达到了本例子的效果。
Istio 还支持在入口网关上基于强认证 JWT 的路由,参考 JWT 基于声明的路由
请记住,reviews:v2
是包含星级评分功能的版本。
kubectl apply -f samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml
$ kubectl get virtualservice reviews -o yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
...
spec:
hosts:
- reviews
http:
- match:
- headers:
end-user:
exact: jason
route:
- destination:
host: reviews
subset: v2
- route:
- destination:
host: reviews
subset: v1
故障注入
通过执行配置请求路由任务或运行以下命令来初始化应用程序版本路由:
kubectl apply -f samples/bookinfo/networking/virtual-service-all-v1.yaml
kubectl apply -f samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml
注入 HTTP 延迟故障
为了测试微服务应用程序 Bookinfo 的弹性,我们将为用户 jason
在 reviews:v2
和 ratings
服务之间注入一个 7 秒的延迟。 这个测试将会发现一个故意引入 Bookinfo 应用程序中的 bug。
注意 reviews:v2
服务对 ratings
服务的调用具有 10 秒的硬编码连接超时。 因此,尽管引入了 7 秒的延迟,我们仍然期望端到端的流程是没有任何错误的。
kubectl apply -f samples/bookinfo/networking/virtual-service-ratings-test-delay.yaml
$ kubectl get virtualservice ratings -o yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
spec:
hosts:
- ratings
http:
- fault:
delay:
fixedDelay: 7s
percentage:
value: 100
match:
- headers:
end-user:
exact: jason
route:
- destination:
host: ratings
subset: v1
- route:
- destination:
host: ratings
subset: v1
结果:
-
通过浏览器打开 Bookinfo 应用。
-
以用户
jason
登录到/productpage
页面。您期望 Bookinfo 主页在大约 7 秒钟加载完成并且没有错误。 但是,出现了一个问题:Reviews 部分显示了错误消息:
Sorry, product reviews are currently unavailable for this book.
-
查看页面的响应时间:
- 打开浏览器的 开发工具 菜单
- 打开 网络 标签
- 重新加载
productpage
页面。您会看到页面加载实际上用了大约 6 秒。
请求超时
HTTP 请求的超时可以用路由规则的 timeout 字段来指定。默认情况下,超时是禁用的,本任务中,会把 reviews
服务的超时设置为 1 秒。为了观察效果,还需要在对 ratings
服务的调用上人为引入 2 秒的延迟。
1.将请求路由到 reviews
服务的 v2 版本,它会发起对 ratings
服务的调用:
kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v2
EOF
2.给对 ratings
服务的调用添加 2 秒的延迟:
kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: ratings
spec:
hosts:
- ratings
http:
- fault:
delay:
percent: 100
fixedDelay: 2s
route:
- destination:
host: ratings
subset: v1
EOF
3.在浏览器中打开 Bookinfo 的网址 http://$GATEWAY_URL/productpage
。
这时可以看到 Bookinfo 应用运行正常(显示了评级的星型符号),但是每次刷新页面,都会有 2 秒的延迟。
4.现在给对 reviews
服务的调用增加一个半秒的请求超时:
kubectl apply -f - <<EOF
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v2
timeout: 0.5s
EOF
熔断
准备工作
本任务展示如何为连接、请求以及异常检测配置熔断。
熔断,是创建弹性微服务应用程序的重要模式。熔断能够使您的应用程序具备应对来自故障、潜在峰值和其他未知网络因素影响的能力。
这个任务中,你将配置熔断规则,然后通过有意的使熔断器“跳闸”来测试配置。
-
跟随安装指南安装 Istio。
-
启动 Httpbin 样例程序。
如果您启用了 Sidecar 自动注入,通过以下命令部署
httpbin
服务:kubectl apply -f samples/httpbin/httpbin.yaml
否则,您必须在部署
httpbin
应用程序前进行手动注入,部署命令如下:kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
应用程序 httpbin
作为此任务的后端服务。
配置熔断器
-
创建一个目标规则,在调用
httpbin
服务时应用熔断设置:如果您的 Istio 启用了双向 TLS 身份验证,则必须在应用目标规则之前将 TLS 流量策略
mode:ISTIO_MUTUAL
添加到DestinationRule
。否则请求将产生 503 错误,如这里所述。kubectl apply -f - <<EOF apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: httpbin spec: host: httpbin trafficPolicy: connectionPool: tcp: maxConnections: 1 http: http1MaxPendingRequests: 1 maxRequestsPerConnection: 1 outlierDetection: consecutive5xxErrors: 1 interval: 1s baseEjectionTime: 3m maxEjectionPercent: 100 EOF
-
验证目标规则是否已正确创建:
$ kubectl get destinationrule httpbin -o yaml apiVersion: networking.istio.io/v1beta1 kind: DestinationRule ... spec: host: httpbin trafficPolicy: connectionPool: http: http1MaxPendingRequests: 1 maxRequestsPerConnection: 1 tcp: maxConnections: 1 outlierDetection: baseEjectionTime: 3m consecutive5xxErrors: 1 interval: 1s maxEjectionPercent: 100
增加一个客户端
创建客户端程序以发送流量到 httpbin
服务。这是一个名为 Fortio 的负载测试客户端,它可以控制连接数、并发数及发送 HTTP 请求的延迟。通过 Fortio 能够有效的触发前面在 DestinationRule
中设置的熔断策略。
-
向客户端注入 Istio Sidecar 代理,以便 Istio 对其网络交互进行管理:
如果你启用了自动注入 Sidecar,可以直接部署
fortio
应用:$ kubectl apply -f samples/httpbin/sample-client/fortio-deploy.yaml
否则,你需要在部署
fortio
应用前手动注入 Sidecar:$ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/sample-client/fortio-deploy.yaml)
-
登入客户端 Pod 并使用 Fortio 工具调用
httpbin
服务。-curl
参数表明发送一次调用:$ export FORTIO_POD=$(kubectl get pods -l app=fortio -o 'jsonpath={.items[0].metadata.name}') $ kubectl exec "$FORTIO_POD" -c fortio -- /usr/bin/fortio curl -quiet http://httpbin:8000/get HTTP/1.1 200 OK server: envoy date: Tue, 25 Feb 2020 20:25:52 GMT content-type: application/json content-length: 586 access-control-allow-origin: * access-control-allow-credentials: true x-envoy-upstream-service-time: 36 { "args": {}, "headers": { "Content-Length": "0", "Host": "httpbin:8000", "User-Agent": "fortio.org/fortio-1.3.1", "X-B3-Parentspanid": "8fc453fb1dec2c22", "X-B3-Sampled": "1", "X-B3-Spanid": "071d7f06bc94943c", "X-B3-Traceid": "86a929a0e76cda378fc453fb1dec2c22", "X-Forwarded-Client-Cert": "By=spiffe://cluster.local/ns/default/sa/httpbin;Hash=68bbaedefe01ef4cb99e17358ff63e92d04a4ce831a35ab9a31d3c8e06adb038;Subject=\"\";URI=spiffe://cluster.local/ns/default/sa/default" }, "origin": "127.0.0.1", "url": "http://httpbin:8000/get" }
可以看到调用后端服务的请求已经成功!接下来,可以测试熔断。
触发熔断器
在 DestinationRule
配置中,您定义了 maxConnections: 1
和 http1MaxPendingRequests: 1
。这些规则意味着,如果并发的连接和请求数超过一个,在 istio-proxy
进行进一步的请求和连接时,后续请求或连接将被阻止。
-
发送并发数为 2 的连接(
-c 2
),请求 20 次(-n 20
):$ kubectl exec "$FORTIO_POD" -c fortio -- /usr/bin/fortio load -c 2 -qps 0 -n 20 -loglevel Warning http://httpbin:8000/get 20:33:46 I logger.go:97> Log level is now 3 Warning (was 2 Info) Fortio 1.3.1 running at 0 queries per second, 6->6 procs, for 20 calls: http://httpbin:8000/get Starting at max qps with 2 thread(s) [gomax 6] for exactly 20 calls (10 per thread + 0) 20:33:46 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:33:47 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:33:47 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) Ended after 59.8524ms : 20 calls. qps=334.16 Aggregated Function Time : count 20 avg 0.0056869 +/- 0.003869 min 0.000499 max 0.0144329 sum 0.113738 # range, mid point, percentile, count >= 0.000499 <= 0.001 , 0.0007495 , 10.00, 2 > 0.001 <= 0.002 , 0.0015 , 15.00, 1 > 0.003 <= 0.004 , 0.0035 , 45.00, 6 > 0.004 <= 0.005 , 0.0045 , 55.00, 2 > 0.005 <= 0.006 , 0.0055 , 60.00, 1 > 0.006 <= 0.007 , 0.0065 , 70.00, 2 > 0.007 <= 0.008 , 0.0075 , 80.00, 2 > 0.008 <= 0.009 , 0.0085 , 85.00, 1 > 0.011 <= 0.012 , 0.0115 , 90.00, 1 > 0.012 <= 0.014 , 0.013 , 95.00, 1 > 0.014 <= 0.0144329 , 0.0142165 , 100.00, 1 # target 50% 0.0045 # target 75% 0.0075 # target 90% 0.012 # target 99% 0.0143463 # target 99.9% 0.0144242 Sockets used: 4 (for perfect keepalive, would be 2) Code 200 : 17 (85.0 %) Code 503 : 3 (15.0 %) Response Header Sizes : count 20 avg 195.65 +/- 82.19 min 0 max 231 sum 3913 Response Body/Total Sizes : count 20 avg 729.9 +/- 205.4 min 241 max 817 sum 14598 All done 20 calls (plus 0 warmup) 5.687 ms avg, 334.2 qps
有趣的是,几乎所有的请求都完成了!
istio-proxy
确实允许存在一些误差。Code 200 : 17 (85.0 %) Code 503 : 3 (15.0 %)
-
将并发连接数提高到 3 个:
kubectl exec "$FORTIO_POD" -c fortio -- /usr/bin/fortio load -c 3 -qps 0 -n 30 -loglevel Warning http://httpbin:8000/get 20:32:30 I logger.go:97> Log level is now 3 Warning (was 2 Info) Fortio 1.3.1 running at 0 queries per second, 6->6 procs, for 30 calls: http://httpbin:8000/get Starting at max qps with 3 thread(s) [gomax 6] for exactly 30 calls (10 per thread + 0) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) 20:32:30 W http_client.go:679> Parsed non ok code 503 (HTTP/1.1 503) Ended after 51.9946ms : 30 calls. qps=576.98 Aggregated Function Time : count 30 avg 0.0040001633 +/- 0.003447 min 0.0004298 max 0.015943 sum 0.1200049 # range, mid point, percentile, count >= 0.0004298 <= 0.001 , 0.0007149 , 16.67, 5 > 0.001 <= 0.002 , 0.0015 , 36.67, 6 > 0.002 <= 0.003 , 0.0025 , 50.00, 4 > 0.003 <= 0.004 , 0.0035 , 60.00, 3 > 0.004 <= 0.005 , 0.0045 , 66.67, 2 > 0.005 <= 0.006 , 0.0055 , 76.67, 3 > 0.006 <= 0.007 , 0.0065 , 83.33, 2 > 0.007 <= 0.008 , 0.0075 , 86.67, 1 > 0.008 <= 0.009 , 0.0085 , 90.00, 1 > 0.009 <= 0.01 , 0.0095 , 96.67, 2 > 0.014 <= 0.015943 , 0.0149715 , 100.00, 1 # target 50% 0.003 # target 75% 0.00583333 # target 90% 0.009 # target 99% 0.0153601 # target 99.9% 0.0158847 Sockets used: 20 (for perfect keepalive, would be 3) Code 200 : 11 (36.7 %) Code 503 : 19 (63.3 %) Response Header Sizes : count 30 avg 84.366667 +/- 110.9 min 0 max 231 sum 2531 Response Body/Total Sizes : count 30 avg 451.86667 +/- 277.1 min 241 max 817 sum 13556 All done 30 calls (plus 0 warmup) 4.000 ms avg, 577.0 qps
现在,您将开始看到预期的熔断行为,只有 36.7% 的请求成功,其余的均被熔断器拦截:
Code 200 : 11 (36.7 %) Code 503 : 19 (63.3 %)
-
查询
istio-proxy
状态以了解更多熔断详情:$ kubectl exec "$FORTIO_POD" -c istio-proxy -- pilot-agent request GET stats | grep httpbin | grep pending cluster.outbound|8000||httpbin.default.svc.cluster.local.circuit_breakers.default.remaining_pending: 1 cluster.outbound|8000||httpbin.default.svc.cluster.local.circuit_breakers.default.rq_pending_open: 0 cluster.outbound|8000||httpbin.default.svc.cluster.local.circuit_breakers.high.rq_pending_open: 0 cluster.outbound|8000||httpbin.default.svc.cluster.local.upstream_rq_pending_active: 0 cluster.outbound|8000||httpbin.default.svc.cluster.local.upstream_rq_pending_failure_eject: 0 cluster.outbound|8000||httpbin.default.svc.cluster.local.upstream_rq_pending_overflow: 21 cluster.outbound|8000||httpbin.default.svc.cluster.local.upstream_rq_pending_total: 29
可以看到
upstream_rq_pending_overflow
值21
,这意味着,目前为止已有 21 个调用被标记为熔断。
清理
-
清理规则:
$ kubectl delete destinationrule httpbin
-
下线 httpbin 服务和客户端:
$ kubectl delete -f samples/httpbin/sample-client/fortio-deploy.yaml $ kubectl delete -f samples/httpbin/httpbin.yaml
安全治理
基于 JWT 声明的路由
阅读大约需要 2 分钟 页面测试
本任务向您展示如何实现基于 Istio 入口网关上的 JWT 声明路由请求,来使用请求身份认证 和虚拟服务。
注意:该特性只支持 Istio 入口网关,并且需要使用请求身份验证和虚拟 服务来根据 JWT 声明进行正确的验证和路由。
以下信息描述了一个实验性功能,仅用于评估。
开始之前
-
使用 Istio 安装指南安装 Istio 。
-
在
foo
命名空间中,部署一个httpbin
工作负载 , 并通过 Istio 入口网关使用以下命令暴露它:$ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin-gateway.yaml) -n foo
-
按照 确定入口的 IP 和端口 使用说明来定义
INGRESS_HOST
和INGRESS_PORT
环境变量。 -
使用下面的命令验证
httpbin
工作负载和入口网关是否按照预期正常工作:$ curl "$INGRESS_HOST:$INGRESS_PORT"/headers -s -o /dev/null -w "%{http_code}\n" 200
如果您没有看到预期的输出,请在几秒钟后重试。因为缓存和传输的开销会导致延迟。
基于 JWT 声明配置入站路由
Istio 入口网关支持基于经过身份验证的 JWT 的路由,这对于基于最终用户身份的路由非常有用,并且比使用未经身份验证的 HTTP 属性(例如:路径或消息头)更安全。
-
为了基于 JWT 声明进行路由,首先创建请求身份验证以启用 JWT 验证:
$ kubectl apply -f - <<EOF apiVersion: security.istio.io/v1beta1 kind: RequestAuthentication metadata: name: ingress-jwt namespace: istio-system spec: selector: matchLabels: istio: ingressgateway jwtRules: - issuer: "testing@secure.istio.io" jwksUri: "https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/jwks.json" EOF
这个请求身份验证将在 Istio 网关上启用 JWT 校验,以便验证过的 JWT 声明稍后可以在虚拟服务中用于路由功能。
这个请求身份验证只应用于入口网关,因为基于路由的 JWT 声明仅在入口网关上得到支持。
注意:请求身份验证将只检查请求中是否存在 JWT。要使 JWT 成为必要条件,如果请求中不包含 JWT 的时候就拒绝请求,请应用任务中指定的授权策略。
-
根据经过验证的 JWT 声明将虚拟服务更新到路由:
$ kubectl apply -f - <<EOF apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: httpbin namespace: foo spec: hosts: - "*" gateways: - httpbin-gateway http: - match: - uri: prefix: /headers headers: "@request.auth.claims.groups": exact: group1 route: - destination: port: number: 8000 host: httpbin EOF
虚拟服务使用保留的消息头
"@request.auth.claims.groups"
来匹配 JWT 声明中的groups
。 前缀的@
表示它与来自 JWT 验证的元数据匹配,而不是与 HTTP 消息头匹配。 JWT 支持字符串类型的声明、字符串列表和嵌套声明。使用.
作为嵌套声明名称的分隔符。 例如,"@request.auth.claims.name.givenName"
匹配嵌套声明name
和givenName
。 当前不支持使用.
字符作为声明名称。
基于 JWT 声明验证入口路由
-
验证入口网关返回没有 JWT 的 HTTP 404 代码:
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" HTTP/1.1 404 Not Found ...
您还可以创建授权策略,以便在缺少 JWT 时使用 HTTP 403 代码显式拒绝请求。
-
验证入口网关返回带有无效 JWT 的 HTTP 401 代码:
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer some.invalid.token" HTTP/1.1 401 Unauthorized ...
401 是由请求身份验证返回的,因为 JWT 声明验证失败。
-
使用包含
groups: group1
声明的有效 JWT 令牌验证入口网关路由请求:$ TOKEN_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/groups-scope.jwt -s) && echo "$TOKEN_GROUP" | cut -d '.' -f2 - | base64 --decode - {"exp":3537391104,"groups":["group1","group2"],"iat":1537391104,"iss":"testing@secure.istio.io","scope":["scope1","scope2"],"sub":"testing@secure.istio.io"}
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer $TOKEN_GROUP" HTTP/1.1 200 OK ...
-
验证入口网关,返回了带有有效 JWT 的 HTTP 404 代码,但不包含
groups: group1
声明:$ TOKEN_NO_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/demo.jwt -s) && echo "$TOKEN_NO_GROUP" | cut -d '.' -f2 - | base64 --decode - {"exp":4685989700,"foo":"bar","iat":1532389700,"iss":"testing@secure.istio.io","sub":"testing@secure.istio.io"}
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer $TOKEN_NO_GROUP" HTTP/1.1 404 Not Found ...
清除
-
移除名称为 foo 的命名空间:
$ kubectl delete namespace foo
-
移除身份认证:
$ kubectl delete requestauthentication ingress-jwt -n istio-system
kubectl apply -f < EOF
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: productpage
spec:
hosts:
- productpage
http:
- route:
- destination:
host: productpage
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: ratings
spec:
hosts:
- ratings
http:
- route:
- destination:
host: ratings
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: details
spec:
hosts:
- details
http:
- route:
- destination:
host: details
subset: v1
---
EOF
kubectl apply -f - <<EOF
apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
EOF
路由规则配置
部署应用
kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
测试服务
kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o "<title>.*</title>"
暴露服务
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: bookinfo-gateway
spec:
selector:
istio: ingressgateway # use istio default controller
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: bookinfo
spec:
hosts:
- "*"
gateways:
- bookinfo-gateway
http:
- match:
- uri:
exact: /productpage
- uri:
prefix: /static
- uri:
exact: /login
- uri:
exact: /logout
- uri:
prefix: /api/v1/products
route:
- destination:
host: productpage
port:
number: 9080
应用默认目标规则
kubectl apply -f samples/bookinfo/networking/destination-rule-all.yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: productpage
spec:
host: productpage
subsets:
- name: v1
labels:
version: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: reviews
spec:
host: reviews
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
- name: v3
labels:
version: v3
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: ratings
spec:
host: ratings
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
- name: v2-mysql
labels:
version: v2-mysql
- name: v2-mysql-vm
labels:
version: v2-mysql-vm
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: details
spec:
host: details
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
---
所有服务都路由到V1版本
kubectl apply -f samples/bookinfo/networking/virtual-service-all-v1.yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: productpage
spec:
hosts:
- productpage
http:
- route:
- destination:
host: productpage
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: ratings
spec:
hosts:
- ratings
http:
- route:
- destination:
host: ratings
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: details
spec:
hosts:
- details
http:
- route:
- destination:
host: details
subset: v1
---
外部访问
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}')
export TCP_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="tcp")].nodePort}')
export INGRESS_HOST=$(kubectl get po -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].status.hostIP}')
export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
echo $GATEWAY_URL
针对用户路由
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- match:
- headers:
end-user:
exact: jason
route:
- destination:
host: reviews
subset: v2
- route:
- destination:
host: reviews
subset: v1
故障注入
review全部路由到v2
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v2
给对 ratings 服务的调用添加 2 秒的延迟:
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: ratings
spec:
hosts:
- ratings
http:
- fault:
delay:
percent: 100
fixedDelay: 2s
route:
- destination:
host: ratings
subset: v1
在浏览器中打开 Bookinfo 的网址 http://$GATEWAY_URL/productpage
。
这时可以看到 Bookinfo 应用运行正常(显示了评级的星型符号),但是每次刷新页面,都会有 2 秒的延迟。
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: reviews
spec:
hosts:
- reviews
http:
- route:
- destination:
host: reviews
subset: v2
timeout: 0.5s
清理
结束对 Bookinfo 示例应用的体验之后,就可以使用下面的命令来完成应用的删除和清理了:
-
删除路由规则,并销毁应用的 Pod
samples/bookinfo/platform/kube/cleanup.sh
-
确认应用已经关停
$ kubectl get virtualservices #-- there should be no virtual services $ kubectl get destinationrules #-- there should be no destination rules $ kubectl get gateway #-- there should be no gateway $ kubectl get pods #-- the Bookinfo pods should be deleted
安全
准备工作
kubectl create ns foo
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo
kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin-gateway.yaml) -n foo
访问测试
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}')
export TCP_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="tcp")].nodePort}')
export INGRESS_HOST=$(kubectl get po -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].status.hostIP}')
export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
echo $GATEWAY_URL
curl "$INGRESS_HOST:$INGRESS_PORT"/headers -s -o /dev/null -w "%{http_code}\n"
为了基于 JWT 声明进行路由,首先创建请求身份验证以启用 JWT 验证:
apiVersion: security.istio.io/v1beta1
kind: RequestAuthentication
metadata:
name: ingress-jwt
namespace: istio-system
spec:
selector:
matchLabels:
istio: ingressgateway
jwtRules:
- issuer: "testing@secure.istio.io"
jwksUri: "https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/jwks.json"
这个请求身份验证将在 Istio 网关上启用 JWT 校验,以便验证过的 JWT 声明稍后可以在虚拟服务中用于路由功能。
这个请求身份验证只应用于入口网关,因为基于路由的 JWT 声明仅在入口网关上得到支持。
注意:请求身份验证将只检查请求中是否存在 JWT。要使 JWT 成为必要条件,如果请求中不包含 JWT 的时候就拒绝请求,请应用任务中指定的授权策略。
根据经过验证的 JWT 声明将虚拟服务更新到路由:
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: httpbin
namespace: foo
spec:
hosts:
- "*"
gateways:
- httpbin-gateway
http:
- match:
- uri:
prefix: /headers
headers:
"@request.auth.claims.groups":
exact: group1
route:
- destination:
port:
number: 8000
host: httpbin
虚拟服务使用保留的消息头 "@request.auth.claims.groups"
来匹配 JWT 声明中的 groups
。 前缀的 @
表示它与来自 JWT 验证的元数据匹配,而不是与 HTTP 消息头匹配。 JWT 支持字符串类型的声明、字符串列表和嵌套声明。使用 .
作为嵌套声明名称的分隔符。 例如, "@request.auth.claims.name.givenName"
匹配嵌套声明 name
和 givenName
。 当前不支持使用 .
字符作为声明名称。
基于 JWT 声明验证入口路由
-
验证入口网关返回没有 JWT 的 HTTP 404 代码:
curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" HTTP/1.1 404 Not Found ...
您还可以创建授权策略,以便在缺少 JWT 时使用 HTTP 403 代码显式拒绝请求。
-
验证入口网关返回带有无效 JWT 的 HTTP 401 代码:
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer some.invalid.token" HTTP/1.1 401 Unauthorized ...
401 是由请求身份验证返回的,因为 JWT 声明验证失败。
-
使用包含
groups: group1
声明的有效 JWT 令牌验证入口网关路由请求:$ TOKEN_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/groups-scope.jwt -s) && echo "$TOKEN_GROUP" | cut -d '.' -f2 - | base64 --decode - {"exp":3537391104,"groups":["group1","group2"],"iat":1537391104,"iss":"testing@secure.istio.io","scope":["scope1","scope2"],"sub":"testing@secure.istio.io"}
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer $TOKEN_GROUP" HTTP/1.1 200 OK ...
-
验证入口网关,返回了带有有效 JWT 的 HTTP 404 代码,但不包含
groups: group1
声明:$ TOKEN_NO_GROUP=$(curl https://raw.githubusercontent.com/istio/istio/release-1.15/security/tools/jwt/samples/demo.jwt -s) && echo "$TOKEN_NO_GROUP" | cut -d '.' -f2 - | base64 --decode - {"exp":4685989700,"foo":"bar","iat":1532389700,"iss":"testing@secure.istio.io","sub":"testing@secure.istio.io"}
$ curl -s -I "http://$INGRESS_HOST:$INGRESS_PORT/headers" -H "Authorization: Bearer $TOKEN_NO_GROUP" HTTP/1.1 404 Not Found ...
清除
-
移除名称为 foo 的命名空间:
$ kubectl delete namespace foo
-
移除身份认证:
$ kubectl delete requestauthentication ingress-jwt -n istio-system
在集群中插入证书和密钥
以下内容仅用于演示。对于生产型集群的设置,强烈建议使用生产型 CA,如 Hashicorp Vault。在具有强大安全保护功能的离线机器上管理根 CA 是一个很好的做法。
Go 1.18 默认禁用对 SHA-1 签名的支持。如果您正在 macOS 上生成证书,请确保您使用的是 OpenSSL。详情请参阅 GitHub issue 38049。
-
在 Istio 安装包的顶层目录下,创建一个目录来存放证书和密钥:
$ mkdir -p certs $ pushd certs
-
生成根证书和密钥:
$ make -f ../tools/certs/Makefile.selfsigned.mk root-ca
将会生成以下文件:
root-cert.pem
:生成的根证书root-key.pem
:生成的根密钥root-ca.conf
:生成根证书的openssl
配置root-cert.csr
:为根证书生成的 CSR
-
对于每个集群,为 Istio CA 生成一个中间证书和密钥。 以下是集群
cluster1
的例子:$ make -f ../tools/certs/Makefile.selfsigned.mk cluster1-cacerts
运行以上命令,将会在名为
cluster1
的目录下生成以下文件:ca-cert.pem
:生成的中间证书ca-key.pem
:生成的中间密钥cert-chain.pem
:istiod 使用的生成的证书链root-cert.pem
:根证书
您可以使用一个您选择的字符串来替换
cluster1
。例如,使用cluster2-cacerts
参数,您可以在一个名为cluster2
的目录中创建证书和密钥。如果您正在离线机器上进行此操作,请将生成的目录复制到可以访问集群的机器上。
-
在每个集群中,创建一个私密
cacerts
,包括所有输入文件ca-cert.pem
,ca-key.pem
,root-cert.pem
和cert-chain.pem
。例如,在cluster1
集群上:$ kubectl create namespace istio-system $ kubectl create secret generic cacerts -n istio-system \ --from-file=cluster1/ca-cert.pem \ --from-file=cluster1/ca-key.pem \ --from-file=cluster1/root-cert.pem \ --from-file=cluster1/cert-chain.pem
-
返回 Istio 安装的顶层目录:
$ popd
部署 Istio
-
使用
demo
配置文件部署 Istio。Istio 的 CA 将会从私密安装文件中读取证书和密钥。
$ istioctl install --set profile=demo
部署实例服务
-
部署
httpbin
和sleep
示例服务。$ kubectl create ns foo $ kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) -n foo $ kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) -n foo
-
为
foo
命名空间中的工作负载部署一个策略,使其只接受相互的 TLS 流量。$ kubectl apply -n foo -f - <<EOF apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: "default" spec: mtls: mode: STRICT EOF
验证证书
本节中,验证工作负载证书是否已通过插入到 CA 中的证书签署。验证的前提要求机器上安装有 openssl
。
-
在检索
httpbin
的证书链之前,请等待 20 秒使mTLS策略生效。由于本例中使用的 CA 证书是自签的,所以可以预料 openssl 命令返回verify error:num=19:self signed certificate in certificate chain
。$ sleep 20; kubectl exec "$(kubectl get pod -l app=sleep -n foo -o jsonpath={.items..metadata.name})" -c istio-proxy -n foo -- openssl s_client -showcerts -connect httpbin.foo:8000 > httpbin-proxy-cert.txt
-
解析证书链上的证书。
$ sed -n '/-----BEGIN CERTIFICATE-----/{:start /-----END CERTIFICATE-----/!{N;b start};/.*/p}' httpbin-proxy-cert.txt > certs.pem $ awk 'BEGIN {counter=0;} /BEGIN CERT/{counter++} { print > "proxy-cert-" counter ".pem"}' < certs.pem
-
确认根证书与管理员指定的证书是否相同:
$ openssl x509 -in certs/cluster1/root-cert.pem -text -noout > /tmp/root-cert.crt.txt $ openssl x509 -in ./proxy-cert-3.pem -text -noout > /tmp/pod-root-cert.crt.txt $ diff -s /tmp/root-cert.crt.txt /tmp/pod-root-cert.crt.txt Files /tmp/root-cert.crt.txt and /tmp/pod-root-cert.crt.txt are identical
-
验证 CA 证书与管理员指定的证书是否相同:
$ openssl x509 -in certs/cluster1/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt $ openssl x509 -in ./proxy-cert-2.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt $ diff -s /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt Files /tmp/ca-cert.crt.txt and /tmp/pod-cert-chain-ca.crt.txt are identical
-
验证从根证书到工作负载证书的证书链:
$ openssl verify -CAfile <(cat certs/cluster1/ca-cert.pem certs/cluster1/root-cert.pem) ./proxy-cert-1.pem ./proxy-cert-1.pem: OK
清理
-
从本地磁盘中删除证书、密钥和中间文件:
$ rm -rf certs
-
删除私密
cacerts
、foo
和istio-system
命名空间:$ kubectl delete secret cacerts -n istio-system $ kubectl delete ns foo istio-system
-
移除 Istio 组件:按照卸载说明进行移除。