安装镜像
由于quay.io仓库部分镜像国内无法下载,所以替换为其他镜像地址
docker pull vbouchaud/nfs-client-provisioner:v3.1.1
nfs4服务端配置
mkdir -p /nfs/data/
chmod 777 /nfs/data/
yum install -y nfs-utils rpcbind
更改归属组与用户
chown nfsnobody /nfs/data/
vi /etc/exports
/nfs/data *(rw,fsid=0,sync,no_wdelay,insecure_locks,no_root_squash)
为了方便接下来两个实验,提前建立2个共享子目录。
mkdir -p /nfs/data/mariadb mkdir -p /nfs/data/nginx systemctl start rpcbind systemctl start nfs
设置开启启动
systemctl enable rpcbind systemctl enable nfs
nfs的storageClass配置
rbac
nfsdynamic/nfsrbac.yml。每次配置文件,只需要调整ClusterRoleBinding、RoleBinding的namespace值,如果服务是部署在默认的namespace中,配置文件不需要调整。
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default #替换成要部署NFS Provisioner的namespace
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default #替换成要部署NFS Provisioner的namespace
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
storageClass
nfsdynamic/nfsstorage.yml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
spec:
replicas: 1
strategy:
#设置升级策略为删除再创建(默认为滚动更新)
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
#由于quay.io仓库部分镜像国内无法下载,所以替换为其他镜像地址
image: vbouchaud/nfs-client-provisioner:v3.1.1
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-client #--- nfs-provisioner的名称,以后设置的 storageclass要和这个保持一致
- name: NFS_SERVER
value: 192.168.198.156 #NFS服务器地址,与volumes.nfs.servers保 持一致
- name: NFS_PATH
value: /nfs/data #NFS服务共享目录地址,与volumes.nfs.path 保持一致
volumes:
- name: nfs-client-root
nfs:
server: 192.168.198.156 #NFS服务器地址,与spec.containers.env.value保持一致
path: /nfs/data #NFS服务器目录,与spec.containers.env.value保持一致
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true" #设置为默认的storageclass
provisioner: nfs-client #动态卷分配者名称,必须和创建的"provisioner"变量中设置的name一致
parameters:
archiveOnDelete: "true" #设置为"false"时删除PVC不会保留数据,"true"则保留数据
mountOptions:
- hard #指定为硬挂载方式
- nfsvers=4 #指定NFS版本,这个需要根据 NFS Server 版本号设置
测试pvc
nfsdynamic/nfstestpvc.yml
用于测试nfs动态pv是否成功。
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-pvc
spec:
storageClassName: nfs-storage #需要与上面创建的storageclass的名称一致
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Mi
部署nfs测试服务
kubectl apply -f .
查看storageClass
kubectl get storageclasses.storage.k8s.io || kubectl get sc
查看mariadb服务
kubectl get svc
查看pv pvc
查看statefulSet
kubectl get sts
查看mariadb、storageClass的pods
kubectl get pods
删除服务
pv是动态生成,通过查看pv状态,发现pv不会自动回收。
删除mariadb服务
kubectl delete -f .
查看动态nfs的pv状态。发现pv的status状态是:Released
kubectl get pv
编译pv的配置文件
kubectl edit pv pvc-59fb2735-9681-426a-8805-8c94685a07e3
将spec.claimRef属性下的所有内容全部删除
claimRef:
apiVersion: v1
kind: PersistentVolumeClaim
name: test-pvc
namespace: default
resourceVersion: "162046"
uid: 59fb2735-9681-426a-8805-8c94685a07e3
再次查看pv状态。发现pv的status状态是:Available
kubectl get pv
删除pv
kubectl delete pv pvc-59fb2735-9681-426a-8805-8c94685a07e3
删除共享目录动态pv的目录
rm -rf pvc-59fb2735-9681-426a-8805-8c94685a07e3
网友评论