云盘算第四阶段: cloud二周目 07-08
cloud 07一、k8s服务管理
https://i-blog.csdnimg.cn/direct/73900949d73b494e8e329e67d661c7b5.png
https://i-blog.csdnimg.cn/direct/1d4d2e0987ff4cc09254d99fd3ba530e.png
创建服务
# 资源清单文件
# kubectl create service clusterip websvc --tcp=80:80 --dry-run=client -o yaml
# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
selector:
app: web
ports:
- protocol: TCP
port: 80
targetPort: 80
# kubectl apply -f websvc.yaml
service/websvc created
# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
kubernetes ClusterIP 10.245.0.1 <none> 443/TCP
websvc ClusterIP 10.245.5.18 <none> 80/TCP解析域名
# 安装工具软件包
# dnf install -y bind-utils
# 查看 DNS 服务地址
# kubectl -n kube-system get service kube-dns
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
kube-dns ClusterIP 10.245.0.10 <none> 53/UDP,53/TCP,9153/TCP
# 域名解析测试
# host websvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases:
websvc.default.svc.cluster.local has address 10.245.5.18创建后端应用
# vim web1.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: web1
labels:
app: web # 服务靠标签寻找后端
spec:
containers:
- name: apache
image: myos:httpd
# kubectl apply -f web1.yaml
pod/web1 created
# curl http://10.245.5.18
Welcome to The Apache.
负载均衡
# sed 's,web1,web2,' web1.yaml |kubectl apply -f -
pod/web2 created
# sed 's,web1,web3,' web1.yaml |kubectl apply -f -
pod/web3 created
# curl -s http://10.245.5.18/info.php |grep php_host
php_host: web1
# curl -s http://10.245.5.18/info.php |grep php_host
php_host: web2
# curl -s http://10.245.5.18/info.php |grep php_host
php_host: web3固定 IP 服务
# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80 # 可以设置 ClusterIP
selector:
app: web
ports:
- protocol: TCP
port: 80
targetPort: 80
# kubectl replace --force -f websvc.yaml
service "websvc" deleted
service/websvc replaced
# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
kubernetes ClusterIP 10.245.0.1 <none> 443/TCP
websvc ClusterIP 10.245.1.80 <none> 80/TCP 端口别名
# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80
selector:
app: web
ports:
- protocol: TCP
port: 80
targetPort: myhttp # 使用别名查找后端服务端口
# kubectl replace --force -f websvc.yaml
service "websvc" deleted
service/websvc replaced
# kubectl delete pod --all
pod "web1" deleted
pod "web2" deleted
pod "web3" deleted
# vim web1.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: web1
labels:
app: web
spec:
containers:
- name: apache
image: myos:httpd
ports: # 配置端口规范
- name: myhttp # 端口别名
protocol: TCP # 协议
containerPort: 80# 端口号
# kubectl apply -f web1.yaml
pod/web1 created
# curl http://10.245.1.80
Welcome to The Apache.服务排错
---
kind: Service
apiVersion: v1
metadata:
name: web123
spec:
type: ClusterIP
clusterIP: 192.168.1.88
selector:
app: apache
ports:
- protocol: TCP
port: 80
targetPort: webnodePort
https://i-blog.csdnimg.cn/direct/66570d8c541a4dbc8fc2fd642953e490.png
https://i-blog.csdnimg.cn/direct/32d3158a9e7c4e2582f6eaa6e9845474.png
对外发布服务
# vim mysvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: mysvc
spec:
type: NodePort # 服务类型
selector:
app: web
ports:
- protocol: TCP
port: 80
nodePort: 30080 # 映射端口号
targetPort: 80
# kubectl apply -f mysvc.yaml
service/mysvc configured
# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
kubernetes ClusterIP 10.245.0.1 <none> 443/TCP
websvc ClusterIP 10.245.1.80 <none> 80/TCP
mysvc NodePort 10.245.3.88 <none> 80:30080/TCP
# curl http://node-0001:30080
Welcome to The Apache.
# curl http://node-0002:30080
Welcome to The Apache.
# curl http://node-0003:30080
Welcome to The Apache.
# curl http://node-0004:30080
Welcome to The Apache.
# curl http://node-0005:30080
Welcome to The Apache.二、lngress 安装与战略配置
https://i-blog.csdnimg.cn/direct/d7c666d7f54b4ff68c7b161cabb1b083.png
https://i-blog.csdnimg.cn/direct/5d605b0f9d8b4164bb13ec9acd2c49e4.png
安装控制器
# cd plugins/ingress
# docker load -i ingress.tar.xz
# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
docker push harbor:443/plugins/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' deploy.yaml
443: image: registry.k8s.io/ingress-nginx/controller:v1.9.6
546: image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06
599: image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06
# kubectl apply -f deploy.yaml
# kubectl -n ingress-nginx get pods
NAME READY STATUS RESTARTS
ingress-nginx-admission-create--1-lm52c 0/1 Completed 0
ingress-nginx-admission-patch--1-sj2lz 0/1 Completed 0
ingress-nginx-controller-5664857866-tql24 1/1 Running 0验证后端服务
# kubectl get pods,services
NAME READY STATUS RESTARTS AGE
pod/web1 1/1 Running 0 35m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S)
service/kubernetes ClusterIP 10.245.0.1 <none> 443/TCP
service/websvc ClusterIP 10.245.1.80 <none> 80/TCP
service/mysvc NodePort 10.245.3.88 <none> 80:30080/TCP
# curl http://10.245.1.80
Welcome to The Apache.对外发布服务
# 查询 ingress 控制器类名称
# kubectl get ingressclasses.networking.k8s.io
NAME CONTROLLER PARAMETERS AGE
nginx k8s.io/ingress-nginx <none> 5m7s
# 资源清单文件
# kubectl create ingress mying --class=nginx --rule=nsd.tedu.cn/*=mysvc:80 --dry-run=client -o yaml
# vim mying.yaml
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: mying
spec:
ingressClassName: nginx
rules:
- host: nsd.tedu.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: websvc
port:
number: 80
# kubectl apply -f mying.yaml
ingress.networking.k8s.io/mying created
# kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS
mying nginx nsd.tedu.cn 192.168.1.51 80
# curl -H "Host: nsd.tedu.cn" http://192.168.1.51
Welcome to The Apache.
三、Dashboard 安装
#下面给各人介绍下新的k8s插件
web 管理插件
https://i-blog.csdnimg.cn/direct/bdeea53d0a494c3d82e1dc3924ed36b0.png
安装 Dashboard
# cd plugins/dashboard
# docker load -i dashboard.tar.xz
# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
docker push harbor:443/plugins/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' recommended.yaml
193: image: kubernetesui/dashboard:v2.7.0
278: image: kubernetesui/metrics-scraper:v1.0.8
# kubectl apply -f recommended.yaml
# kubectl -n kubernetes-dashboard get pods
NAME READY STATUS RESTARTS
dashboard-metrics-scraper-66f6f56b59-b42ng 1/1 Running 0
kubernetes-dashboard-65ff57f4cf-lwtsk 1/1 Running 0发布服务
# 查看服务状态
# kubectl -n kubernetes-dashboard get service
NAME TYPE CLUSTER-IP PORT(S)
dashboard-metrics-scraper ClusterIP 10.245.205.236 8000/TCP
kubernetes-dashboard ClusterIP 10.245.215.40 443/TCP
# 获取服务资源对象文件
# sed -n '30,45p' recommended.yaml >dashboard-svc.yaml
# vim dashboard-svc.yaml
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
nodePort: 30443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
# kubectl apply -f dashboard-svc.yaml
service/kubernetes-dashboard configured
# kubectl -n kubernetes-dashboard get service
NAME TYPE CLUSTER-IP PORT(S)
dashboard-metrics-scraper ClusterIP 10.245.205.236 8000/TCP
kubernetes-dashboard NodePort 10.245.215.40 443:30443/TCPhttps://i-blog.csdnimg.cn/direct/3317ca69fd914f0c81eabde2acf303a2.png
https://i-blog.csdnimg.cn/direct/ccafc85c9b8b49bcaedf49266ac0dd8d.png
https://i-blog.csdnimg.cn/direct/f3a58b0a1a364ff2935815d3fd81b475.png
[*]#记得访问下仪表盘dashboard登录页面
四、RBAC 权限管理
服务账号与权限
创建服务账号
# 资源对象模板
# kubectl -n kubernetes-dashboard create serviceaccount kube-admin --dry-run=client -o yaml
# vim admin-user.yaml
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-admin
namespace: kubernetes-dashboard
# kubectl apply -f admin-user.yaml
serviceaccount/kube-admin created
# kubectl -n kubernetes-dashboard get serviceaccounts
NAME SECRETS AGE
default 0 16m
kube-admin 0 11s
kubernetes-dashboard 0 16m获取用户 token
# kubectl -n kubernetes-dashboard create token kube-admin
<Base64 编码的令牌数据>https://i-blog.csdnimg.cn/direct/e39fb259dbbc4068897ed3dd32187354.png
脚色与鉴权
https://i-blog.csdnimg.cn/direct/33c2649ac3d249899b976691b1f06b9c.png
https://i-blog.csdnimg.cn/direct/cf6deb9117bb4132b85de8a62131d34a.jpeg
#雷同网游DNF内里的脚色管理,GM管理员和玩家的关系。
https://i-blog.csdnimg.cn/direct/5cb4a30032f6487487900d868728a16b.jpeg
资源对象描述作用域ServiceAccount服务账号,为 Pod 中运行的进程提供了一个身份单一名称空间Role脚色,包含一组代表相关权限的规则单一名称空间ClusterRole脚色,包含一组代表相关权限的规则全集群RoleBinding将权限赋予用户,Role、ClusterRole 均可使用单一名称空间ClusterRoleBinding将权限赋予用户,只可以使用 ClusterRole全集群 资源对象权限
createdeletedeletecollectiongetlistpatchupdatewatch创建删除删除集合获取属性获取列表补丁更新监控 普通脚色
# kubectl cluster-info dump |grep authorization-mode
"--authorization-mode=Node,RBAC",
# 资源对象模板
# kubectl -n default create role myrole --resource=pods --verb=get,list --dry-run=client -o yaml
# kubectl -n default create rolebinding kube-admin-role --role=myrole --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
# vim myrole.yaml
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: myrole
namespace: default
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-admin-role
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: myrole
subjects:
- kind: ServiceAccount
name: kube-admin
namespace: kubernetes-dashboard
# kubectl apply -f myrole.yaml
role.rbac.authorization.k8s.io/myrole created
rolebinding.rbac.authorization.k8s.io/kube-admin-role created
# kubectl delete -f myrole.yaml
role.rbac.authorization.k8s.io "myrole" deleted
rolebinding.rbac.authorization.k8s.io "kube-admin-role" deleted 集群管理员
# kubectl get clusterrole
NAME CREATED AT
admin 2022-06-24T08:11:17Z
cluster-admin 2022-06-24T08:11:17Z
... ...
# 资源对象模板
# kubectl create clusterrolebinding kube-admin-role --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
# vim admin-user.yaml
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-admin
namespace: kubernetes-dashboard
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-admin-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kube-admin
namespace: kubernetes-dashboard
# kubectl apply -f admin-user.yaml
serviceaccount/kube-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/kube-admin-role created cloud 08
https://i-blog.csdnimg.cn/direct/bb317e9e145645a79ff0854ab8bf0c89.png
#上一小节讲过K8S的有控制组件和计算组件。现在我们一起来深入研究K8S的控制组件。 一、Deployment
https://i-blog.csdnimg.cn/direct/b6a85e774e59401cad80c4e9974db7b2.png
https://i-blog.csdnimg.cn/direct/62085289ee6e4662ba3da1c1bd25f933.png
资源清单文件
# kubectl create deployment myweb --image=myos:httpd --dry-run=client -o yaml
# vim mydeploy.yaml
---
kind: Deployment # 资源对象类型
apiVersion: apps/v1 # 版本
metadata: # 元数据
name: mydeploy # 名称
spec: # 详细定义
replicas: 3 # 副本数量
selector: # 定义标签选择器
matchLabels: # 支持 matchExpressions 表达式语法
app: deploy-httpd # 通过标签来确定那个 Pod 由它来管理
template: # 定义用来创建 Pod 的模板,以下为 Pod 定义
metadata:
labels:
app: deploy-httpd
spec:
containers:
- name: apache
image: myos:httpd
配置案例
# 创建控制器
# kubectl apply -f mydeploy.yaml
deployment.apps/mydeploy created
# kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
mydeploy 3/3 3 3 1s
# 控制器自动创建 ReplicaSet
# kubectl get replicasets
NAME DESIRED CURRENT READY AGE
mydeploy-76f96b85df 3 3 3 2s
# 控制器自动创建 Pod
# kubectl get pods
NAME READY STATUS RESTARTS AGE
mydeploy-76f96b85df-5gng9 1/1 Running 0 3s
mydeploy-76f96b85df-vsfrw 1/1 Running 0 3s
mydeploy-76f96b85df-z9x95 1/1 Running 0 3s
# 集群自维护自治理
# kubectl delete pod --all
pod "mydeploy-76f96b85df-5gng9" deleted
pod "mydeploy-76f96b85df-vsfrw" deleted
pod "mydeploy-76f96b85df-z9x95" deleted
# 删除后自动重新创建
# kubectl get pods
NAME READY STATUS RESTARTS AGE
mydeploy-76f96b85df-7dvwh 1/1 Running 0 7s
mydeploy-76f96b85df-kpbz4 1/1 Running 0 7s
mydeploy-76f96b85df-kr2zq 1/1 Running 0 7s 集群服务
https://i-blog.csdnimg.cn/direct/b0660aee16d4412c9d02beaa71b19fc8.png
# 创建集群服务
# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80
selector:
app: deploy-httpd
ports:
- protocol: TCP
port: 80
targetPort: 80
# kubectl replace --force -f websvc.yaml
service/websvc replaced
# curl -m 3 http://10.245.1.80
Welcome to The Apache.集群扩缩容
#抽象来说,扩容就是在底子存储装备上,添加新的装备,然后挂载到新的装备上。到达扩容结果, 雷同吃鸡游戏里的扩容弹夹。
https://i-blog.csdnimg.cn/direct/66ca1b9ba9e944f1b872e910388c783f.png
而缩容就是为了到达更佳的运行服从,淘汰存储装备上的存储空间,到达缩容目的。古代的增兵减灶
# 集群扩容
# kubectl scale deployment mydeploy --replicas 10
deployment.apps/mydeploy scaled
# kubectl get pods
NAME READY STATUS RESTARTS AGE
mydeploy-76f96b85df-kg27l 1/1 Running 0 6s
mydeploy-76f96b85df-q5fzb 1/1 Running 0 6s
mydeploy-76f96b85df-rxhp4 1/1 Running 0 6s
mydeploy-76f96b85df-szf69 1/1 Running 0 6s
mydeploy-76f96b85df-tp2xj 1/1 Running 0 6s
......
# 集群缩容
# kubectl scale deployment mydeploy --replicas=2
deployment.apps/mydeploy scaled
# kubectl get pods
NAME READY STATUS RESTARTS AGE
mydeploy-76f96b85df-7dvwh 1/1 Running 0 51s
mydeploy-76f96b85df-kr2zq 1/1 Running 0 51s历史版本信息
# 查看历史版本
# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy
REVISIONCHANGE-CAUSE
1 <none>
# 添加注释信息
# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v1"
deployment.apps/mydeploy annotated
# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy
REVISIONCHANGE-CAUSE
1 httpd.v1
# 更新资源清单文件
# vim mydeploy.yaml
# 在创建容器的镜像下面添加
imagePullPolicy: Always
# kubectl apply -f mydeploy.yaml
deployment.apps/mydeploy patched
# 更新版本信息
# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v2"
deployment.apps/mydeploy annotated
# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy
REVISIONCHANGE-CAUSE
1 httpd.v1
2 httpd.v2
滚动更新
# 修改镜像,滚动更新集群
# kubectl set image deployment mydeploy apache=myos:nginx
deployment.apps/mydeploy image updated
# 给新版本添加注释信息
# kubectl annotate deployments mydeploy kubernetes.io/change-cause="nginx.v1"
deployment.apps/mydeploy annotated
# 查看历史版本信息
# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy
REVISIONCHANGE-CAUSE
1 httpd.v1
2 httpd.v2
3 nginx.v1
# 访问验证服务
# curl -m 3 http://10.245.1.80
Nginx is running !版本回滚
#雷同游戏内里的怀旧服,而这里的版本回滚是用于规复数据
# 历史版本与回滚
# kubectl rollout undo deployment mydeploy --to-revision 1
deployment.apps/mydeploy rolled back
# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy
REVISIONCHANGE-CAUSE
2 httpd.v2
3 nginx.v1
4 httpd.v1
# curl -m 3 http://10.245.1.80
Welcome to The Apache.https://i-blog.csdnimg.cn/direct/a98ed2784b7e467bb9a328d931f5f293.png
清理资源对象
# 删除控制器时会自动回收自己创建的 Pod
# kubectl delete deployments mydeploy
deployment.apps "mydeploy" deleted 二、DaemonSet
https://i-blog.csdnimg.cn/direct/47ae1b011c81468c86347fbe26080aa2.png
配置案例
# cp -a mydeploy.yaml myds.yaml
# vim myds.yaml
---
kind: DaemonSet # 资源对象类型
apiVersion: apps/v1
metadata:
name: myds # 控制器名称
spec:
# replicas: 2 # 删除副本参数
selector:
matchLabels:
app: ds-httpd # 修改标签防止冲突
template:
metadata:
labels:
app: ds-httpd # 修改标签防止冲突
spec:
containers:
- name: apache
image: myos:httpd
imagePullPolicy: Always
# kubectl apply -f myds.yaml
daemonset.apps/myds created
# kubectl get pods -o wide
NAME READY STATUS RESTARTS IP NODE
myds-msrcx 1/1 Running 0 10.244.1.11 node-0001
myds-lwq8l 1/1 Running 0 10.244.2.17 node-0002
myds-4wt72 1/1 Running 0 10.244.3.14 node-0003
myds-6k82t 1/1 Running 0 10.244.4.15 node-0004
myds-9c6wc 1/1 Running 0 10.244.5.19 node-0005清理资源对象
# 删除控制器
# kubectl delete daemonsets myds
daemonset.apps "myds" deleted
三、Job、CronJob
https://i-blog.csdnimg.cn/direct/6850b4ebfad54642bd80b8a35743ab79.png
Job 控制器
# 资源文件模板
# kubectl create job myjob --image=myos:8.5 --dry-run=client -o yaml -- sleep 3
# vim myjob.yaml
---
kind: Job
apiVersion: batch/v1
metadata:
name: myjob
spec:
template:# 以下定义 Pod 模板
metadata: {}
spec:
restartPolicy: OnFailure
containers:
- name: myjob
image: myos:8.5
command: ["/bin/sh"]
args:
- -c
- |
sleep 3
exit $((RANDOM%2))
# kubectl apply -f myjob.yaml
job.batch/myjob created
# 失败了会重启
# kubectl get pods -l job-name=myjob -w
NAME READY STATUS RESTARTS AGE
myjob--1-lrtbk 1/1 Running 0 2s
myjob--1-lrtbk 0/1 Error 0 4s
myjob--1-lrtbk 1/1 Running 1 (1s ago) 5s
myjob--1-lrtbk 0/1 Completed 1 9s
# kubectl get jobs.batch
NAME COMPLETIONS DURATION AGE
myjob 1/1 8s 12s
# 删除Job控制器
# kubectl delete -f myjob.yaml
job.batch "myjob" deleted #pod控制器创建失败,任务会确保创建成功而重启,制止失败
Cronjob
#类似ansible中的crontab模块,可以定时执行某一任务 配置案例
# 资源对象模板
# kubectl create cronjob mycj --image=myos:8.5 --schedule='* * * * *' --dry-run=client -o yaml -- sleep 3
# vim mycj.yaml
---
kind: CronJob
apiVersion: batch/v1
metadata:
name: mycj
spec:
schedule: "* * * * *"
jobTemplate:# 以下定义 Job 模板
metadata: {}
spec:
template:
metadata: {}
spec:
restartPolicy: OnFailure
containers:
- name: myjob
image: myos:8.5
command: ["/bin/sh"]
args:
- -c
- |
sleep 3
exit $((RANDOM%2))
# kubectl apply -f mycj.yaml
cronjob.batch/mycj created
# kubectl get cronjobs
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
mycj * * * * 1-5 False 0 <none> 4s
# 按照时间周期,每分钟触发一个任务
# kubectl get jobs -w
NAME READY STATUS RESTARTS
mycj-27808172--1-w6sbx 0/1 Pending 0
mycj-27808172--1-w6sbx 0/1 ContainerCreating 0
mycj-27808172--1-w6sbx 1/1 Running 0
mycj-27808172--1-w6sbx 0/1 Completed 1
# 保留三次结果,多余的会被删除
# kubectl get jobs
NAME COMPLETIONS DURATION AGE
mycj-27605367 1/1 31s 3m30s
mycj-27605368 1/1 31s 2m30s
mycj-27605369 1/1 31s 90s
mycj-27605370 0/1 30s 30s
# kubectl get jobs
NAME COMPLETIONS DURATION AGE
mycj-27605368 1/1 31s 2m33s
mycj-27605369 1/1 31s 93s
mycj-27605370 1/1 31s 33s
# 删除CJ控制器
# kubectl delete -f mycj.yaml
cronjob.batch "mycj" deleted
四、StatefulSet
https://i-blog.csdnimg.cn/direct/b46a65db69744b929f3ae100c50df3db.png
Headless 服务
# cp websvc.yaml stssvc.yaml
# vim stssvc.yaml
---
kind: Service
apiVersion: v1
metadata:
name: stssvc # 服务名称
spec:
type: ClusterIP
clusterIP: None # 设置 IP 为 None
selector:
app: sts-httpd # 设置 Pod 标签
ports:
- protocol: TCP
port: 80
targetPort: 80
# kubectl apply -f stssvc.yaml
service/stssvc created
# kubectl get services stssvc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
stssvc ClusterIP None <none> 80/TCP 51s资源清单文件
# cp -a mydeploy.yaml mysts.yaml
# vim mysts.yaml
---
kind: StatefulSet # 资源对象类型
apiVersion: apps/v1
metadata:
name: mysts # 控制器名称
spec:
serviceName: stssvc # 新增 headless 服务名称
replicas: 3
selector:
matchLabels:
app: sts-httpd # 修改标签防止冲突
template:
metadata:
labels:
app: sts-httpd# 修改标签防止冲突
spec:
containers:
- name: apache
image: myos:httpd配置案例
# statefulset 主要解决了 Pod 创建顺序的问题
# statefulset 主要解决了访问指定 Pod 的问题
# kubectl apply -f mysts.yaml
statefulset.apps/mysts created
# kubectl get pods
NAME READY STATUS RESTARTS AGE
mysts-0 1/1 Running 0 3s
mysts-1 1/1 Running 0 2s
mysts-2 1/1 Running 0 1s
# 所有 Pod IP 地址
# host stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases:
stssvc.default.svc.cluster.local has address 10.244.1.81
stssvc.default.svc.cluster.local has address 10.244.2.82
stssvc.default.svc.cluster.local has address 10.244.3.83
# 单个 Pod IP 地址
# host mysts-0.stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases:
mysts-0.stssvc.default.svc.cluster.local has address 10.244.1.81
# 删除sts控制器
# kubectl delete -f mysts.yaml -f stssvc.yaml
statefulset.apps "mysts" deleted
service "stssvc" deleted
弹性云服务五、HorizontalPodAutoscaler
https://i-blog.csdnimg.cn/direct/94065328ce794a1396abb74bc0bddef3.png
配置后端服务
# 为 Deploy 模板添加资源配额
# cat mydeploy.yaml websvc.yaml >mycluster.yaml
# vim mycluster.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: mydeploy
spec:
replicas: 1
selector:
matchLabels:
app: deploy-httpd
template:
metadata:
labels:
app: deploy-httpd
spec:
containers:
- name: apache
image: myos:httpd
resources: # 为该资源设置配额
requests: # HPA 控制器会根据配额使用情况伸缩集群
cpu: 300m # CPU 配额
---
kind: Service
apiVersion: v1
metadata:
name: websvc
spec:
type: ClusterIP
clusterIP: 10.245.1.80
selector:
app: deploy-httpd
ports:
- protocol: TCP
port: 80
targetPort: 80
# kubectl replace --force -f mycluster.yaml
deployment.apps/mydeploy replaced
service/websvc replaced
# 验证服务
# kubectl top pods
NAME CPU(cores) MEMORY(bytes)
mydeploy-b4f9dc786-w4x2z 6m 18Mi
# curl -s http://10.245.1.80/info.php
<pre>
Array
(
=> 10.244.219.64
=> GET
=> curl/7.61.1
=> /info.php
)
php_host: mydeploy-b4f9dc786-w4x2z
1229 HPA 控制器
# vim myhpa.yaml
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:
name: myhpa
spec:
behavior:
scaleDown:
stabilizationWindowSeconds: 60
scaleTargetRef:
kind: Deployment
apiVersion: apps/v1
name: mydeploy
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 50
# kubectl apply -f myhpa.yaml
horizontalpodautoscaler.autoscaling/myhpa created
# 刚刚创建 unknown 是正常现象,最多等待 60s 就可以正常获取数据
# kubectl get horizontalpodautoscalers
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS
myhpa Deployment/mydeploy <unknown>/50% 1 5 0
# kubectl get horizontalpodautoscalers
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS
myhpa Deployment/mydeploy 0%/50% 1 5 3配置案例
https://i-blog.csdnimg.cn/direct/60941470132d4192bf70bae916528486.png
# 终端 1 访问提高负载
# while sleep 1;do curl -s "http://10.245.1.80/info.php?id=100000" -o /dev/null; done &
# 终端 2 监控 HPA 变化
# kubectl get hpa -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
myhpa Deployment/mydeploy 0%/50% 1 5 1 1m
myhpa Deployment/mydeploy 31%/50% 1 5 1 2m
myhpa Deployment/mydeploy 70%/50% 1 5 1 2m15s
myhpa Deployment/mydeploy 72%/50% 1 5 2 2m30s
myhpa Deployment/mydeploy 36%/50% 1 5 2 2m45s
myhpa Deployment/mydeploy 55%/50% 1 5 2 3m
myhpa Deployment/mydeploy 58%/50% 1 5 3 3m15s
myhpa Deployment/mydeploy 39%/50% 1 5 3 3m30s
... ...
myhpa Deployment/mydeploy 66%/50% 1 5 4 5m
myhpa Deployment/mydeploy 68%/50% 1 5 5 5m15s
myhpa Deployment/mydeploy 55%/50% 1 5 5 5m30s
myhpa Deployment/mydeploy 58%/50% 1 5 5 5m45s
myhpa Deployment/mydeploy 62%/50% 1 5 5 6m
# 如果 60s 内平均负载小于标准值,就会自动缩减集群规模
# kubectl get hpa -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
myhpa Deployment/mydeploy 52%/50% 1 5 5 13m
myhpa Deployment/mydeploy 44%/50% 1 5 5 13m15s
myhpa Deployment/mydeploy 38%/50% 1 5 5 13m30s
myhpa Deployment/mydeploy 35%/50% 1 5 5 13m45s
myhpa Deployment/mydeploy 28%/50% 1 5 5 14m
... ...
myhpa Deployment/mydeploy 8%/50% 1 5 5 18m30s
myhpa Deployment/mydeploy 9%/50% 1 5 4 18m45s
myhpa Deployment/mydeploy 9%/50% 1 5 4 19m
myhpa Deployment/mydeploy 12%/50% 1 5 3 19m15s
myhpa Deployment/mydeploy 15%/50% 1 5 3 19m30s
myhpa Deployment/mydeploy 18%/50% 1 5 2 19m45s
myhpa Deployment/mydeploy 33%/50% 1 5 1 20m
课后总结:
#我们本节学的好多控制器,都有差异和区别,可以按照雷同以下的提示词,来对AI提问,得到更加符合工作使用情况的答复。
https://i-blog.csdnimg.cn/direct/9e9bd35dd0df4870b72f01a29490ae60.png
至此云盘算cloud二周目内容更新完毕!
各人有想练习的,可以去华为云、阿里云等云平台,创建帐号,使用30天免费体验版云产品
熟悉相关云产品的使用与配置,内里也有一些项目的免费体验课,可以照着案例学基本项目架构
下一阶段,将回重回网络阶段,深入了解云盘算与云原生领域的网络架构知识.
下个阶段见!!!
免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。
页:
[1]