- 确保已经设置了对应的动态卷的驱动(provisioner 制备器)
- 基于动态驱动创建对应的存储类
- 创建PVC (PVC 将会自动根据大小、访问模式等创建PV)
- Pod的spec 中通过volumes 和 volumemounts 来完成pvc 的绑定和pvc对应pv的挂载
- 删除pod 不会删除PVC, 删除PVC也不会导致使用PVC的pod被删除
- PVC 在移除时,默认对应的PV也会被移除,但是reclaimPolicy 的值设定为 retain 或者 recycle的情况下,PV会被保留下来
静态卷的制备:
- 确保对应卷驱动存在
- 使用驱动创建对应的存储类
- 手动创建PV
- 手动创建PVC,PVC 基于 大小、访问模式、存储类 绑定到符合条件的PV
- 后续的使用步骤和动态卷一致
[root@control ~]# kubectl apply -f nfs-csi-test.yml
persistentvolumeclaim/pvc0001 created
[root@control ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
pvc0001 Pending nfs-csi <unset> 7s
# 因为动态卷对应的nfs 服务端未启动,所以PVC的状态为pending 即调度中,等待创建
# 去nfs服务端启动服务,动态卷自动创建。PVC状态为已绑定
[root@control ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
pvc0001 Bound pvc-4e543d46-aa2c-4259-8a13-6412bc049038 1Gi RWX nfs-csi <unset> 40s
[root@control ~]# ls nfs-*
nfs-csi-test.yml nfs-pvc.yml nfs-pv.yml
[root@control ~]# cat nfs-pv.yml
apiVersion: v1
kind: PersistentVolume
metadata:name: pv0003
spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: ''mountOptions:- hard- nfsvers=4.1nfs:path: /nfs-shareserver: node1
[root@control ~]# kubectl apply -f nfs-pv.yml
persistentvolume/pv0003 created
[root@control ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
pv0003 5Gi RWO Retain Available <unset> 7s
pvc-4e543d46-aa2c-4259-8a13-6412bc049038 1Gi RWX Retain Bound default/pvc0001 nfs-csi <unset> 3m50s
[root@control ~]# kubectl delete -f nfs-pv.yml // 和动态卷目录混合在一起,不太容易区分,为静态卷指定对应的目录
persistentvolume "pv0003" deleted
nfs服务端也完成对应目录的导出
// 清理已删除的动态卷的目录,结合自己的环境
[root@node1 ~]# rm -rf /nfs-share/pvc-255fbbe9-13b9-41d9-8cfc-deb28eea1d42/ /nfs-share/pvc-5376c184-5be5-47a0-ae94-918e7440176e/ /nfs-share/pvc-a1603ec6-4c0c-4c2b-a1a9-26b119fff8ce/
[root@node1 ~]# mkdir /nfs-share/pv0003
[root@node1 ~]# vim /etc/exports[root@node1 ~]# cat /etc/exports
/nfs-share 192.168.110.0/24(rw,sync,no_root_squash)
/nfs-share/pv0003 192.168.110.0/24(rw,sync,no_root_squash)[root@node1 ~]# exportfs -rv
exporting 192.168.110.0/24:/nfs-share/pv0003
exporting 192.168.110.0/24:/nfs-share
[root@node1 ~]# ls /nfs-share/pv0003/[root@control ~]# vim nfs-pv.yml
[root@control ~]# cat nfs-pv.yml
apiVersion: v1
kind: PersistentVolume
metadata:name: pv0003
spec:capacity:storage: 5GivolumeMode: FilesystemaccessModes:- ReadWriteOncepersistentVolumeReclaimPolicy: RetainstorageClassName: ''mountOptions:- hard- nfsvers=4.1nfs:path: /nfs-share/pv0003server: node1[root@control ~]# kubectl apply -f nfs-pv.yml
persistentvolume/pv0003 created
[root@control ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS VOLUMEATTRIBUTESCLASS REASON AGE
pv0003 5Gi RWO Retain Available <unset> 6s
pvc-4e543d46-aa2c-4259-8a13-6412bc049038 1Gi RWX Retain Bound default/pvc0001 nfs-csi <unset> 9m11s
[root@control ~]# kubectl describe pv pv0003
Name: pv0003
Labels: <none>
Annotations: <none>
Finalizers: [kubernetes.io/pv-protection]
StorageClass:
Status: Available
Claim:
Reclaim Policy: Retain
Access Modes: RWO
VolumeMode: Filesystem
Capacity: 5Gi
Node Affinity: <none>
Message:
Source:Type: NFS (an NFS mount that lasts the lifetime of a pod)Server: node1Path: /nfs-share/pv0003ReadOnly: false
Events: <none>
[root@control ~]# vim nfs-pvc.yml
[root@control ~]# cat nfs-pvc.yml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc0003
spec:accessModes:- ReadWriteOnceresources:requests:storage: 5GistorageClassName: ''[root@control ~]# kubectl apply -f nfs-pvc.yml
persistentvolumeclaim/pvc0003 created
[root@control ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
pvc0001 Bound pvc-4e543d46-aa2c-4259-8a13-6412bc049038 1Gi RWX nfs-csi <unset> 11m
pvc0003 Bound pv0003 5Gi RWO <unset> 6s
[root@control ~]# kubectl describe pvc pv0003
Error from server (NotFound): persistentvolumeclaims "pv0003" not found
[root@control ~]# kubectl describe pvc pvc0003
Name: pvc0003
Namespace: default
StorageClass:
Status: Bound
Volume: pv0003
Labels: <none>
Annotations: pv.kubernetes.io/bind-completed: yespv.kubernetes.io/bound-by-controller: yes
Finalizers: [kubernetes.io/pvc-protection]
Capacity: 5Gi
Access Modes: RWO
VolumeMode: Filesystem
Used By: <none>
Events: <none>
[root@control ~]# vim test-nginx.yml
[root@control ~]# cp test-nginx.yml static-volume-nginx.yml
[root@control ~]# vim static-volume-nginx.yml
[root@control ~]# kubectl apply -f static-volume-nginx.yml
deployment.apps/static-nginx created
[root@control ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
static-nginx-7d8cfbdf8-6ckzq 1/1 Running 0 11s
static-nginx-7d8cfbdf8-qqtft 1/1 Running 0 11s
static-nginx-7d8cfbdf8-x8znc 1/1 Running 0 11s
[root@control ~]# kubectl describe pod static-nginx-7d8cfbdf8-6ckzq
Name: static-nginx-7d8cfbdf8-6ckzq
Namespace: default
Priority: 0
Service Account: default
Node: node2/192.168.110.22
Start Time: Wed, 16 Oct 2024 09:02:41 +0800
Labels: app=frontendpod-template-hash=7d8cfbdf8
Annotations: <none>
Status: Running
IP: 10.244.2.161
IPs:IP: 10.244.2.161
Controlled By: ReplicaSet/static-nginx-7d8cfbdf8
Containers:static-nginx:Container ID: containerd://2d30d84d1dee46efd77b149690d09c59c44cdc97e14898f6c645acfe1133ac66Image: mynginx:new_filesImage ID: sha256:2a1e46ec2739c364dea52056f4440f3abd9a4dc0a3afcc8e705637aef5fceabdPort: 80/TCPHost Port: 0/TCPState: RunningStarted: Wed, 16 Oct 2024 09:02:43 +0800Ready: TrueRestart Count: 0Environment: <none>Mounts:/usr/share/nginx/html from nfs-static (rw)/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-lh9fz (ro)
Conditions:Type StatusPodReadyToStartContainers TrueInitialized TrueReady TrueContainersReady TruePodScheduled True
Volumes:nfs-static:Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)ClaimName: pvc0003ReadOnly: falsekube-api-access-lh9fz:Type: Projected (a volume that contains injected data from multiple sources)TokenExpirationSeconds: 3607ConfigMapName: kube-root-ca.crtConfigMapOptional: <nil>DownwardAPI: true
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300snode.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:Type Reason Age From Message---- ------ ---- ---- -------Normal Scheduled 28s default-scheduler Successfully assigned default/static-nginx-7d8cfbdf8-6ckzq to node2Normal Pulled 27s kubelet Container image "mynginx:new_files" already present on machineNormal Created 27s kubelet Created container static-nginxNormal Started 27s kubelet Started container static-nginx
hostPath类型的卷,需要保证在对应的节点上目录已经创建完成,具体参考K8S官方文档:
配置 Pod 以使用 PersistentVolume 作为存储 | Kubernetes本文将向你介绍如何配置 Pod 使用 PersistentVolumeClaim 作为存储。 以下是该过程的总结:你作为集群管理员创建由物理存储支持的 PersistentVolume。你不会将该卷与任何 Pod 关联。你现在以开发人员或者集群用户的角色创建一个 PersistentVolumeClaim, 它将自动绑定到合适的 PersistentVolume。你创建一个使用以上 PersistentVolumeClaim 作为存储的 Pod。准备开始 你需要一个包含单个节点的 Kubernetes 集群,并且必须配置 kubectl 命令行工具以便与集群交互。 如果还没有单节点集群,可以使用 Minikube 创建一个。熟悉持久卷文档。在你的节点上创建一个 index.html 文件 打开集群中的某个节点的 Shell。 如何打开 Shell 取决于集群的设置。 例如,如果你正在使用 Minikube,那么可以通过输入 minikube ssh 来打开节点的 Shell。在该节点的 Shell 中,创建一个 /mnt/data 目录:# 这里假定你的节点使用 "sudo" 来以超级用户角色执行命令 sudo mkdir /mnt/data 在 /mnt/data 目录中创建一个 index.html 文件:# 这里再次假定你的节点使用 "sudo" 来以超级用户角色执行命令 sudo sh -c "echo 'Hello from Kubernetes storage' > /mnt/data/index.https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume