云计算第四阶段: cloud二周目 07-08

cloud 07

一、k8s服务管理

创建服务

# 资源清单文件
[root@master ~]# kubectl create service clusterip websvc --tcp=80:80 --dry-run=client -o yaml
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:name: websvc
spec:type: ClusterIPselector:app: webports:- protocol: TCPport: 80targetPort: 80[root@master ~]# kubectl apply -f websvc.yaml 
service/websvc created
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1      <none>        443/TCP
websvc       ClusterIP   10.245.5.18     <none>        80/TCP
解析域名
# 安装工具软件包
[root@master ~]# dnf install -y bind-utils
# 查看 DNS 服务地址
[root@master ~]# kubectl -n kube-system get service kube-dns
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kube-dns   ClusterIP   10.245.0.10   <none>        53/UDP,53/TCP,9153/TCP
# 域名解析测试
[root@master ~]# host websvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: websvc.default.svc.cluster.local has address 10.245.5.18

创建后端应用

[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:name: web1labels:app: web   # 服务靠标签寻找后端
spec:containers:- name: apacheimage: myos:httpd[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.5.18
Welcome to The Apache.
负载均衡
[root@master ~]# sed 's,web1,web2,' web1.yaml |kubectl apply -f -
pod/web2 created
[root@master ~]# sed 's,web1,web3,' web1.yaml |kubectl apply -f -
pod/web3 created
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web1
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web2
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web3
固定 IP 服务
[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:name: websvc
spec:type: ClusterIPclusterIP: 10.245.1.80    # 可以设置 ClusterIPselector:app: webports:- protocol: TCPport: 80targetPort: 80[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP

端口别名

[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:name: websvc
spec:type: ClusterIPclusterIP: 10.245.1.80selector:app: webports:- protocol: TCPport: 80targetPort: myhttp    # 使用别名查找后端服务端口[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced[root@master ~]# kubectl delete pod --all
pod "web1" deleted
pod "web2" deleted
pod "web3" deleted[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:name: web1labels:app: web
spec:containers:- name: apacheimage: myos:httpdports:               # 配置端口规范- name: myhttp       # 端口别名protocol: TCP      # 协议containerPort: 80  # 端口号[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.

服务排错

---
kind: Service
apiVersion: v1
metadata:name: web123
spec:type: ClusterIPclusterIP: 192.168.1.88selector:app: apacheports:- protocol: TCPport: 80targetPort: web

nodePort


对外发布服务

[root@master ~]# vim mysvc.yaml
---
kind: Service
apiVersion: v1
metadata:name: mysvc
spec:type: NodePort            # 服务类型selector:app: webports:- protocol: TCPport: 80nodePort: 30080         # 映射端口号targetPort: 80[root@master ~]# kubectl apply -f mysvc.yaml 
service/mysvc configured
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP
mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP[root@master ~]# curl http://node-0001:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0002:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0003:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0004:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0005:30080
Welcome to The Apache.

二、lngress 安装与策略配置

安装控制器

[root@master ~]# cd plugins/ingress
[root@master ingress]# docker load -i ingress.tar.xz
[root@master ingress]# docker images|while read i t _;do[[ "${t}" == "TAG" ]] && continue[[ "${i}" =~ ^"harbor:443/".+ ]] && continuedocker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}docker push harbor:443/plugins/${i##*/}:${t}docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master ingress]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' deploy.yaml
443:    image: registry.k8s.io/ingress-nginx/controller:v1.9.6
546:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06
599:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06[root@master ingress]# kubectl apply -f deploy.yaml
[root@master ingress]# kubectl -n ingress-nginx get pods
NAME                                        READY   STATUS      RESTARTS
ingress-nginx-admission-create--1-lm52c     0/1     Completed   0
ingress-nginx-admission-patch--1-sj2lz      0/1     Completed   0
ingress-nginx-controller-5664857866-tql24   1/1     Running     0
验证后端服务
[root@master ~]# kubectl get pods,services 
NAME       READY   STATUS    RESTARTS   AGE
pod/web1   1/1     Running   0          35mNAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
service/kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
service/websvc       ClusterIP   10.245.1.80   <none>        80/TCP
service/mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.
对外发布服务
# 查询 ingress 控制器类名称
[root@master ~]# kubectl get ingressclasses.networking.k8s.io 
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       5m7s# 资源清单文件
[root@master ~]# kubectl create ingress mying --class=nginx --rule=nsd.tedu.cn/*=mysvc:80 --dry-run=client -o yaml
[root@master ~]# vim mying.yaml
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:name: mying
spec:ingressClassName: nginxrules:- host: nsd.tedu.cnhttp:paths:- path: /pathType: Prefixbackend:service:name: websvcport:number: 80[root@master ~]# kubectl apply -f mying.yaml 
ingress.networking.k8s.io/mying created
[root@master ~]# kubectl get ingress
NAME    CLASS   HOSTS         ADDRESS        PORTS
mying   nginx   nsd.tedu.cn   192.168.1.51   80
[root@master ~]# curl -H "Host: nsd.tedu.cn" http://192.168.1.51
Welcome to The Apache.


三、Dashboard 安装

#下面给大家介绍下新的k8s插件

web 管理插件

安装 Dashboard

[root@master ~]# cd plugins/dashboard
[root@master dashboard]# docker load -i dashboard.tar.xz
[root@master dashboard]# docker images|while read i t _;do[[ "${t}" == "TAG" ]] && continue[[ "${i}" =~ ^"harbor:443/".+ ]] && continuedocker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}docker push harbor:443/plugins/${i##*/}:${t}docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master dashboard]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' recommended.yaml
193:    image: kubernetesui/dashboard:v2.7.0
278:    image: kubernetesui/metrics-scraper:v1.0.8
[root@master dashboard]# kubectl apply -f recommended.yaml
[root@master dashboard]# kubectl -n kubernetes-dashboard get pods
NAME                                         READY   STATUS    RESTARTS
dashboard-metrics-scraper-66f6f56b59-b42ng   1/1     Running   0
kubernetes-dashboard-65ff57f4cf-lwtsk        1/1     Running   0

发布服务

# 查看服务状态
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        ClusterIP   10.245.215.40    443/TCP
# 获取服务资源对象文件
[root@master dashboard]# sed -n '30,45p' recommended.yaml >dashboard-svc.yaml
[root@master dashboard]# vim dashboard-svc.yaml
---
kind: Service
apiVersion: v1
metadata:labels:k8s-app: kubernetes-dashboardname: kubernetes-dashboardnamespace: kubernetes-dashboard
spec:type: NodePortports:- port: 443nodePort: 30443targetPort: 8443selector:k8s-app: kubernetes-dashboard[root@master dashboard]# kubectl apply -f dashboard-svc.yaml 
service/kubernetes-dashboard configured
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        NodePort    10.245.215.40    443:30443/TCP

  • #记得访问下仪表盘dashboard登录页面

四、RBAC 权限管理

服务账号与权限

创建服务账号

# 资源对象模板
[root@master ~]# kubectl -n kubernetes-dashboard create serviceaccount kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml
---
kind: ServiceAccount
apiVersion: v1
metadata:name: kube-adminnamespace: kubernetes-dashboard[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin created
[root@master ~]# kubectl -n kubernetes-dashboard get serviceaccounts 
NAME                   SECRETS   AGE
default                0         16m
kube-admin             0         11s
kubernetes-dashboard   0         16m

获取用户 token

[root@master ~]# kubectl -n kubernetes-dashboard create token kube-admin
<Base64 编码的令牌数据>

角色与鉴权

#类似网游DNF里面的角色管理,GM管理员和玩家的关系。

资源对象描述作用域
ServiceAccount服务账号,为 Pod 中运行的进程提供了一个身份单一名称空间
Role角色,包含一组代表相关权限的规则单一名称空间
ClusterRole角色,包含一组代表相关权限的规则全集群
RoleBinding将权限赋予用户,Role、ClusterRole 均可使用单一名称空间
ClusterRoleBinding将权限赋予用户,只可以使用 ClusterRole全集群

资源对象权限

createdeletedeletecollectiongetlistpatchupdatewatch
创建删除删除集合获取属性获取列表补丁更新监控
普通角色
[root@master ~]# kubectl cluster-info dump |grep authorization-mode"--authorization-mode=Node,RBAC",# 资源对象模板
[root@master ~]# kubectl -n default create role myrole --resource=pods --verb=get,list --dry-run=client -o yaml
[root@master ~]# kubectl -n default create rolebinding kube-admin-role --role=myrole --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim myrole.yaml 
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: myrolenamespace: default
rules:
- apiGroups:- ""resources:- podsverbs:- get- list---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: kube-admin-rolenamespace: default
roleRef:apiGroup: rbac.authorization.k8s.iokind: Rolename: myrole
subjects:
- kind: ServiceAccountname: kube-adminnamespace: kubernetes-dashboard[root@master ~]# kubectl apply -f myrole.yaml 
role.rbac.authorization.k8s.io/myrole created
rolebinding.rbac.authorization.k8s.io/kube-admin-role created[root@master ~]# kubectl delete -f myrole.yaml 
role.rbac.authorization.k8s.io "myrole" deleted
rolebinding.rbac.authorization.k8s.io "kube-admin-role" deleted
集群管理员
[root@master ~]# kubectl get clusterrole
NAME                              CREATED AT
admin                             2022-06-24T08:11:17Z
cluster-admin                     2022-06-24T08:11:17Z
... ...# 资源对象模板
[root@master ~]# kubectl create clusterrolebinding kube-admin-role --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml 
---
kind: ServiceAccount
apiVersion: v1
metadata:name: kube-adminnamespace: kubernetes-dashboard---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:name: kube-admin-role
roleRef:apiGroup: rbac.authorization.k8s.iokind: ClusterRolename: cluster-admin
subjects:
- kind: ServiceAccountname: kube-adminnamespace: kubernetes-dashboard[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/kube-admin-role created

cloud 08

#上一小节讲过K8S的有控制组件和计算组件。现在我们一起来深入研究K8S的控制组件。

一、Deployment

资源清单文件
[root@master ~]# kubectl create deployment myweb --image=myos:httpd --dry-run=client -o yaml
[root@master ~]# vim mydeploy.yaml
---
kind: Deployment          # 资源对象类型
apiVersion: apps/v1       # 版本
metadata:                 # 元数据name: mydeploy          # 名称
spec:                     # 详细定义replicas: 3             # 副本数量selector:               # 定义标签选择器matchLabels:          # 支持 matchExpressions 表达式语法app: deploy-httpd   # 通过标签来确定那个 Pod 由它来管理template:               # 定义用来创建 Pod 的模板,以下为 Pod 定义metadata:labels:app: deploy-httpdspec:containers:- name: apacheimage: myos:httpd
配置案例
# 创建控制器
[root@master ~]# kubectl apply -f mydeploy.yaml 
deployment.apps/mydeploy created[root@master ~]# kubectl get deployments
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
mydeploy   3/3     3            3           1s# 控制器自动创建 ReplicaSet
[root@master ~]# kubectl get replicasets 
NAME                  DESIRED   CURRENT   READY   AGE
mydeploy-76f96b85df   3         3         3       2s# 控制器自动创建 Pod
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-5gng9   1/1     Running   0          3s
mydeploy-76f96b85df-vsfrw   1/1     Running   0          3s
mydeploy-76f96b85df-z9x95   1/1     Running   0          3s# 集群自维护自治理
[root@master ~]# kubectl delete pod --all 
pod "mydeploy-76f96b85df-5gng9" deleted
pod "mydeploy-76f96b85df-vsfrw" deleted
pod "mydeploy-76f96b85df-z9x95" deleted# 删除后自动重新创建
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          7s
mydeploy-76f96b85df-kpbz4   1/1     Running   0          7s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          7s
集群服务

# 创建集群服务
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:name: websvc
spec:type: ClusterIPclusterIP: 10.245.1.80selector:app: deploy-httpdports:- protocol: TCPport: 80targetPort: 80[root@master ~]# kubectl replace --force -f websvc.yaml 
service/websvc replaced
[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.
集群扩缩容

#抽象来说,扩容就是在基础存储设备上,添加新的设备,然后挂载到新的设备上。达到扩容结果, 类似吃鸡游戏里的扩容弹夹。

而缩容就是为了达到更佳的运行效率,减少存储设备上的存储空间,达到缩容目的。古代的增兵减灶

# 集群扩容
[root@master ~]# kubectl scale deployment mydeploy --replicas 10
deployment.apps/mydeploy scaled[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-kg27l   1/1     Running   0          6s
mydeploy-76f96b85df-q5fzb   1/1     Running   0          6s
mydeploy-76f96b85df-rxhp4   1/1     Running   0          6s
mydeploy-76f96b85df-szf69   1/1     Running   0          6s
mydeploy-76f96b85df-tp2xj   1/1     Running   0          6s
......# 集群缩容
[root@master ~]# kubectl scale deployment mydeploy --replicas=2
deployment.apps/mydeploy scaled[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          51s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          51s
历史版本信息
# 查看历史版本
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         <none># 添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v1"
deployment.apps/mydeploy annotated[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1# 更新资源清单文件
[root@master ~]# vim mydeploy.yaml
# 在创建容器的镜像下面添加imagePullPolicy: Always[root@master ~]# kubectl apply -f mydeploy.yaml
deployment.apps/mydeploy patched# 更新版本信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v2"
deployment.apps/mydeploy annotated[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
滚动更新
# 修改镜像,滚动更新集群
[root@master ~]# kubectl set image deployment mydeploy apache=myos:nginx
deployment.apps/mydeploy image updated# 给新版本添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="nginx.v1"
deployment.apps/mydeploy annotated# 查看历史版本信息
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
3         nginx.v1# 访问验证服务
[root@master ~]# curl -m 3 http://10.245.1.80
Nginx is running !
版本回滚

#类似游戏里面的怀旧服,而这里的版本回滚是用于恢复数据

# 历史版本与回滚
[root@master ~]# kubectl rollout undo deployment mydeploy --to-revision 1
deployment.apps/mydeploy rolled back[root@master ~]# kubectl rollout history deployment mydeploy 
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
2         httpd.v2
3         nginx.v1
4         httpd.v1[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.

清理资源对象
# 删除控制器时会自动回收自己创建的 Pod
[root@master ~]# kubectl delete deployments mydeploy
deployment.apps "mydeploy" deleted

二、DaemonSet

配置案例
[root@master ~]# cp -a mydeploy.yaml myds.yaml
[root@master ~]# vim myds.yaml
---
kind: DaemonSet         # 资源对象类型
apiVersion: apps/v1
metadata:name: myds            # 控制器名称
spec:# replicas: 2         # 删除副本参数selector:matchLabels:app: ds-httpd     # 修改标签防止冲突template:metadata:labels:app: ds-httpd   # 修改标签防止冲突spec:containers:- name: apacheimage: myos:httpdimagePullPolicy: Always[root@master ~]# kubectl apply -f myds.yaml 
daemonset.apps/myds created
[root@master ~]# kubectl get pods -o wide
NAME         READY   STATUS    RESTARTS   IP            NODE
myds-msrcx   1/1     Running   0          10.244.1.11   node-0001
myds-lwq8l   1/1     Running   0          10.244.2.17   node-0002
myds-4wt72   1/1     Running   0          10.244.3.14   node-0003
myds-6k82t   1/1     Running   0          10.244.4.15   node-0004
myds-9c6wc   1/1     Running   0          10.244.5.19   node-0005
清理资源对象
# 删除控制器
[root@master ~]# kubectl delete daemonsets myds
daemonset.apps "myds" deleted

三、Job、CronJob

 

 Job 控制器

# 资源文件模板
[root@master ~]# kubectl create job myjob --image=myos:8.5 --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim myjob.yaml 
---
kind: Job
apiVersion: batch/v1
metadata:name: myjob
spec:template:  # 以下定义 Pod 模板metadata: {}spec:restartPolicy: OnFailurecontainers:- name: myjobimage: myos:8.5command: ["/bin/sh"]args:- -c- |sleep 3exit $((RANDOM%2))[root@master ~]# kubectl apply -f myjob.yaml 
job.batch/myjob created# 失败了会重启
[root@master ~]# kubectl get pods -l job-name=myjob -w
NAME             READY   STATUS      RESTARTS     AGE
myjob--1-lrtbk   1/1     Running     0            2s
myjob--1-lrtbk   0/1     Error       0            4s
myjob--1-lrtbk   1/1     Running     1 (1s ago)   5s
myjob--1-lrtbk   0/1     Completed   1            9s[root@master ~]# kubectl get jobs.batch 
NAME    COMPLETIONS   DURATION   AGE
myjob   1/1           8s         12s# 删除Job控制器
[root@master ~]# kubectl delete -f myjob.yaml 
job.batch "myjob" deleted

#pod控制器创建失败,任务会确保创建成功而重启,避免失败

Cronjob

#类似ansible中的crontab模块,可以定时执行某一任务
配置案例
# 资源对象模板
[root@master ~]# kubectl create cronjob mycj --image=myos:8.5 --schedule='* * * * *' --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim mycj.yaml
---
kind: CronJob
apiVersion: batch/v1
metadata:name: mycj
spec:schedule: "* * * * *"jobTemplate:  # 以下定义 Job 模板metadata: {}spec:template:metadata: {}spec:restartPolicy: OnFailurecontainers:- name: myjobimage: myos:8.5command: ["/bin/sh"]args:- -c- |sleep 3exit $((RANDOM%2))[root@master ~]# kubectl apply -f mycj.yaml 
cronjob.batch/mycj created
[root@master ~]# kubectl get cronjobs 
NAME   SCHEDULE        SUSPEND   ACTIVE   LAST SCHEDULE   AGE
mycj   * * * * 1-5     False     0        <none>          4s# 按照时间周期,每分钟触发一个任务
[root@master ~]# kubectl get jobs -w
NAME                     READY   STATUS              RESTARTS
mycj-27808172--1-w6sbx   0/1     Pending             0
mycj-27808172--1-w6sbx   0/1     ContainerCreating   0
mycj-27808172--1-w6sbx   1/1     Running             0
mycj-27808172--1-w6sbx   0/1     Completed           1# 保留三次结果,多余的会被删除
[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605367   1/1           31s        3m30s
mycj-27605368   1/1           31s        2m30s
mycj-27605369   1/1           31s        90s
mycj-27605370   0/1           30s        30s[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605368   1/1           31s        2m33s
mycj-27605369   1/1           31s        93s
mycj-27605370   1/1           31s        33s# 删除CJ控制器
[root@master ~]# kubectl delete -f mycj.yaml 
cronjob.batch "mycj" deleted

四、StatefulSet

Headless 服务
[root@master ~]# cp websvc.yaml stssvc.yaml 
[root@master ~]# vim stssvc.yaml 
---
kind: Service
apiVersion: v1
metadata:name: stssvc          # 服务名称
spec:type: ClusterIPclusterIP: None       # 设置 IP 为 Noneselector:app: sts-httpd      # 设置 Pod 标签ports:- protocol: TCPport: 80targetPort: 80[root@master ~]# kubectl apply -f stssvc.yaml 
service/stssvc created[root@master ~]# kubectl get services stssvc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
stssvc       ClusterIP   None          <none>        80/TCP    51s
资源清单文件
[root@master ~]# cp -a mydeploy.yaml mysts.yaml
[root@master ~]# vim mysts.yaml
---
kind: StatefulSet       # 资源对象类型
apiVersion: apps/v1
metadata:name: mysts           # 控制器名称
spec:serviceName: stssvc   # 新增 headless 服务名称replicas: 3selector:matchLabels:app: sts-httpd    # 修改标签防止冲突template:metadata:labels:app: sts-httpd  # 修改标签防止冲突spec:containers:- name: apacheimage: myos:httpd
配置案例
# statefulset 主要解决了 Pod 创建顺序的问题
# statefulset 主要解决了访问指定 Pod 的问题
[root@master ~]# kubectl apply -f mysts.yaml 
statefulset.apps/mysts created[root@master ~]# kubectl get pods
NAME      READY   STATUS    RESTARTS   AGE
mysts-0   1/1     Running   0          3s
mysts-1   1/1     Running   0          2s
mysts-2   1/1     Running   0          1s# 所有 Pod IP 地址
[root@master ~]# host stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
stssvc.default.svc.cluster.local has address 10.244.1.81
stssvc.default.svc.cluster.local has address 10.244.2.82
stssvc.default.svc.cluster.local has address 10.244.3.83# 单个 Pod IP 地址
[root@master ~]# host mysts-0.stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
mysts-0.stssvc.default.svc.cluster.local has address 10.244.1.81# 删除sts控制器
[root@master ~]# kubectl delete -f mysts.yaml -f stssvc.yaml
statefulset.apps "mysts" deleted
service "stssvc" deleted
弹性云服务

五、HorizontalPodAutoscaler

 配置后端服务

# 为 Deploy 模板添加资源配额
[root@master ~]# cat mydeploy.yaml websvc.yaml >mycluster.yaml
[root@master ~]# vim mycluster.yaml 
---
kind: Deployment
apiVersion: apps/v1
metadata:name: mydeploy
spec:replicas: 1selector:matchLabels:app: deploy-httpdtemplate:metadata:labels:app: deploy-httpdspec:containers:- name: apacheimage: myos:httpdresources:           # 为该资源设置配额requests:          # HPA 控制器会根据配额使用情况伸缩集群cpu: 300m        # CPU 配额---
kind: Service
apiVersion: v1
metadata:name: websvc
spec:type: ClusterIPclusterIP: 10.245.1.80selector:app: deploy-httpdports:- protocol: TCPport: 80targetPort: 80[root@master ~]# kubectl replace --force -f mycluster.yaml
deployment.apps/mydeploy replaced
service/websvc replaced# 验证服务
[root@master ~]# kubectl top pods
NAME                       CPU(cores)   MEMORY(bytes)   
mydeploy-b4f9dc786-w4x2z   6m           18Mi            [root@master ~]# curl -s http://10.245.1.80/info.php
<pre>
Array
([REMOTE_ADDR] => 10.244.219.64[REQUEST_METHOD] => GET[HTTP_USER_AGENT] => curl/7.61.1[REQUEST_URI] => /info.php
)
php_host:   mydeploy-b4f9dc786-w4x2z
1229

HPA 控制器

[root@master ~]# vim myhpa.yaml 
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:name: myhpa
spec:behavior:scaleDown:stabilizationWindowSeconds: 60scaleTargetRef:kind: DeploymentapiVersion: apps/v1name: mydeployminReplicas: 1maxReplicas: 5metrics:- type: Resourceresource:name: cputarget:type: UtilizationaverageUtilization: 50[root@master ~]# kubectl apply -f myhpa.yaml 
horizontalpodautoscaler.autoscaling/myhpa created# 刚刚创建 unknown 是正常现象,最多等待 60s 就可以正常获取数据
[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS         MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   <unknown>/50%   1         5         0[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   0%/50%    1         5         3
配置案例

# 终端 1 访问提高负载
[root@master ~]# while sleep 1;do curl -s "http://10.245.1.80/info.php?id=100000" -o /dev/null; done &
# 终端 2 监控 HPA 变化
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   0%/50%    1         5         1          1m
myhpa   Deployment/mydeploy   31%/50%   1         5         1          2m
myhpa   Deployment/mydeploy   70%/50%   1         5         1          2m15s
myhpa   Deployment/mydeploy   72%/50%   1         5         2          2m30s
myhpa   Deployment/mydeploy   36%/50%   1         5         2          2m45s
myhpa   Deployment/mydeploy   55%/50%   1         5         2          3m
myhpa   Deployment/mydeploy   58%/50%   1         5         3          3m15s
myhpa   Deployment/mydeploy   39%/50%   1         5         3          3m30s
... ...
myhpa   Deployment/mydeploy   66%/50%   1         5         4          5m
myhpa   Deployment/mydeploy   68%/50%   1         5         5          5m15s
myhpa   Deployment/mydeploy   55%/50%   1         5         5          5m30s
myhpa   Deployment/mydeploy   58%/50%   1         5         5          5m45s
myhpa   Deployment/mydeploy   62%/50%   1         5         5          6m# 如果 60s 内平均负载小于标准值,就会自动缩减集群规模
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   52%/50%   1         5         5          13m
myhpa   Deployment/mydeploy   44%/50%   1         5         5          13m15s
myhpa   Deployment/mydeploy   38%/50%   1         5         5          13m30s
myhpa   Deployment/mydeploy   35%/50%   1         5         5          13m45s
myhpa   Deployment/mydeploy   28%/50%   1         5         5          14m
... ...
myhpa   Deployment/mydeploy   8%/50%    1         5         5          18m30s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          18m45s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          19m
myhpa   Deployment/mydeploy   12%/50%   1         5         3          19m15s
myhpa   Deployment/mydeploy   15%/50%   1         5         3          19m30s
myhpa   Deployment/mydeploy   18%/50%   1         5         2          19m45s
myhpa   Deployment/mydeploy   33%/50%   1         5         1          20m

课后总结:

#我们本节学的好多控制器,都有差异和区别,可以按照类似以下的提示词,来对AI提问,得到更加符合工作使用环境的回答。


至此云计算cloud二周目内容更新完毕!

大家有想练习的,可以去华为云、阿里云等云平台,创建帐号,使用30天免费体验版云产品

熟悉相关云产品的使用与配置,里面也有一些项目的免费体验课,可以照着案例学基本项目架构

下一阶段,将回重回网络阶段,深入了解云计算与云原生领域的网络架构知识.

                        

下个阶段见!!!   

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.rhkb.cn/news/450758.html

如若内容造成侵权/违法违规/事实不符,请联系长河编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

汽车建模用什么软件最好?汽车建模渲染建议!

在汽车建模和渲染领域&#xff0c;选择合适的软件对于实现精确的设计与高质量的视觉效果至关重要。那么不少的汽车设计师如何选择合适的建模软件与渲染方案呢&#xff0c;一起来简单看看吧&#xff01; 一、汽车建模用软件推荐 1、Alias Autodesk旗下的Alias系列软件是汽车设…

数据结构实验十二 图的遍历及应用

数据结构实验十二 图的遍历及应用 一、【实验目的】 1、 理解图的存储结构与基本操作&#xff1b; 2、熟悉图的深度度优先遍历和广度优先遍历算法 3、掌握图的单源最短路径算法 二、【实验内容】 1.根据下图&#xff08;图见实验11&#xff09;邻接矩阵&#xff0c;编程实…

刚刚,ChatGPT推出Windows客户端!

大家好&#xff0c;我是木易&#xff0c;一个持续关注AI领域的互联网技术产品经理&#xff0c;国内Top2本科&#xff0c;美国Top10 CS研究生&#xff0c;MBA。我坚信AI是普通人变强的“外挂”&#xff0c;专注于分享AI全维度知识&#xff0c;包括但不限于AI科普&#xff0c;AI工…

C# + SQLiteExpert 进行(cipher)加密数据库开发+Costura.Fody 清爽发布

一&#xff1a;让 SQLiteExpert 支持&#xff08;cipher&#xff09;加密数据库 SQLiteExpert 作为SQlite 的管理工具&#xff0c;默认不支持加密数据库的&#xff0c;使其成为支持&#xff08;cipher&#xff09;加密数据库的管理工具&#xff0c;需要添加e_sqlcipher.dll &…

1997-2022年各省农作物总播种面积数据(无缺失)

1997-2022年各省农作物总播种面积数据 1、时间&#xff1a;1997-2022年 2、来源&#xff1a;国家统计局、统计年鉴 3、指标&#xff1a;农作物总播种面积(千公顷) 4、范围&#xff1a;31省 5、缺失情况&#xff1a;无缺失 6、指标解释&#xff1a;农作物播种面积指农业生…

PCL 点云配准-改进的RANSAC算法(粗配准)

目录 一、概述 1.1原理 1.2实现步骤 1.3应用场景 二、代码实现 2.1关键函数 2.1.1 计算FPFH特征 2.1.2 RANSAC配准 2.1.3 可视化点云 2.2完整代码 三、实现效果 PCL点云算法汇总及实战案例汇总的目录地址链接&#xff1a; PCL点云算法与项目实战案例汇总&#xff0…

基于SSM高校课程评价的设计

教师账户功能包括&#xff1a;系统首页&#xff0c;个人中心&#xff0c;指标信息管理&#xff0c;课程信息管理&#xff0c;教师自评管理 学生账号功能包括&#xff1a;系统首页&#xff0c;个人中心&#xff0c;课程信息管理&#xff0c;学生评价管理 开发系统&#xff1a;…

不坑盒子在哪儿下载?

不坑盒子是一款Office办公软件的插件&#xff0c;支持MicroSoft Office和WPS的三件套&#xff08;Word、Excel、PPT&#xff09;。 可以为你的Office软件增加数百个实用功能&#xff0c;比如&#xff1a;自动排版、智能写作、仿手写、全文加拼音、稿子模板、一键删除、数据分发…

SAP物料凭证报表字段调整

业务场景&#xff1a; 报表MB51的输入和输出字段调整&#xff1a; 输入&#xff08;选择界面&#xff09; 输出界面 可以看到在这是没有布局调整的 后台路径&#xff1a; SPRO-物料管理-库存管理和实际库存-报表-定义物料凭证列表的字段选择 事务码&#xff1a;SM30-V_MMI…

docker构建jar镜像

文章目录 构建 DockerFile将jar包上传到创建的目录当中在目录中创建 Dockerfile 文件构建镜像创建并启动容器说明 构建 DockerFile [root192 /]# mkdir my [root192 /]# cd my [root192 my]# 将jar包上传到创建的目录当中 在目录中创建 Dockerfile 文件 vi Dockerfile FROM …

MFC工控项目实例二十四模拟量校正值输入

承接专栏《MFC工控项目实例二十三模拟量输入设置界面》 对模拟量输入的零点校正值及满量程对应的电压值进行输入。 1、在SenSet.h文件中添加代码 #include "BtnST.h" #include "ShadeButtonST.h"/ // SenSet dialogclass SenSet : public CDialog { // Co…

RPC通讯基础原理

1.RPC&#xff08;Remote Procedure Call&#xff09;概述 RPC是一种通过网络从远程计算机上调用程序的技术&#xff0c;使得构建分布式计算更加容易&#xff0c;在提供强大的远程调用能力时不损失本地调用的语义简洁性&#xff0c;提供一种透明调用机制&#xff0c;让使用者不…

Leetcode 跳跃游戏 二

核心任务是找出从数组的起点跳到终点所需的最小跳跃次数。 这段代码解决的是“跳跃游戏 II”&#xff08;Leetcode第45题&#xff09;&#xff0c;其核心任务是找出从数组的起点跳到终点所需的最小跳跃次数。 class Solution {public int jump(int[] nums) {//首先处理特殊情…

【文化课学习笔记】【化学】选必三:同分异构体的书写

【化学】选必三&#xff1a;同分异构体的书写 如果你是从 B 站一化儿笔记区来的&#xff0c;请先阅读我在第一篇有机化学笔记中的「读前须知」(点开头的黑色小三角展开)&#xff1a;链接 链状烃的取代和插空法 取代法 一取代物 甲烷、乙烷、丙烷、丁烷的种类 甲烷&#xff1a;只…

【AAOS】Android Automotive 14模拟器源码下载及编译

源码下载 repo init -u https://android.googlesource.com/platform/manifest -b android-14.0.0_r20 repo sync -c --no-tags --no-clone-bundle 源码编译 source build/envsetup.sh lunch sdk_car_x86_64-trunk_staging-eng make -j8 运行效果 emualtor Home All apps …

计算机是如何输入存储输出汉字、图片、音频、视频的

计算机是如何输入存储输出汉字、图片、音频、视频的 为了便于理解&#xff0c;先了解一下计算机的组成。 冯诺依曼计算机的五大组成部分。分别是运算器、控制器、存储器、输入设备和输出设备。参见下图&#xff1a; 一、运算器 运算器又称“算术逻辑单元”&#xff0c;是计算…

Android Camera2在textureView中的预览和拍照

Camera2预览和拍照 1、Camera2相机模型2、Camera2的重要类3、Camera2调用流程4、Camera2调用实现 1)定义TextureView作为预览界面2)设置相机参数3)开启相机4)开启相机预览5)实现PreviewCallback6)拍照 1、Camera2相机模型 解释上诉示意图&#xff0c;假如想要同时拍摄两张不同…

图像及视频的基本操作

文章目录 一、认识计算机中的图像二、图像数据的读取三、数据读取-视频四、图像的其他操作 一、认识计算机中的图像 一张彩色图片是由很多个像素点组合而成的&#xff0c;而一个像素点是由R G B三个通道组成。RGB代表红色&#xff08;Red&#xff09;、绿色&#xff08;Green&a…

【远程监控新体验】OpenObserve结合内网穿透无公网IP远程访问全攻略

文章目录 前言1. 安装Docker2. Docker镜像源添加方法3. 创建并启动OpenObserve容器4. 本地访问测试5. 公网访问本地部署的OpenObserve5.1 内网穿透工具安装5.2 创建公网地址6. 配置固定公网地址前言 本文主要介绍如何在Linux系统使用Docker快速本地化部署OpenObserve云原生可观…

MacOS RocketMQ安装

MacOS RocketMQ安装 文章目录 MacOS RocketMQ安装一、下载二、安装修改JVM参数启动关闭测试关闭测试测试收发消息运行自带的生产者测试类运行自带的消费者测试类参考博客&#xff1a;https://blog.csdn.net/zhiyikeji/article/details/140911649 一、下载 打开官网&#xff0c;…