kubernetes dashboard

重新疏理k8s dashboard安装
github地址:https://github.com/kubernetes/dashboard

在线dashboard2.1 deployment清单文件

1
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.1.0/aio/deploy/recommended.yaml

默认type为ClusterIp即只能看到Service地址,需要穿透集群边界让外部进行访问
方式有ingress、NodePort、外部LoadBalance、pod HostPort端口转发、pod HostNetwork 等方式,这里采用最简单的NodePort

修改recommended.yaml清单文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- nodePort: 30443
port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
type: NodePort

或者跑起来后再直接修改svc/kubernetes-dashboard

1
kubectl edit svc/kubernetes-dashboard  -n kubernetes-dashboard  # type: NodePort

按你的需要添加单独的NameSpace

1
2
3
4
5
6
7
8
root@k8s-m:/data/dashboard# kubectl    create namespace admin-ns --dry-run=client -o yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: admin-ns
spec: {}
status: {}

按你的需要添加kubernetes-dashboard

1
2
3
4
5
6
7
8
9
root@k8s-m:/data/dashboard# kubectl    create sa  superadmin -n admin-ns
serviceaccount/superadmin created
root@k8s-m:/data/dashboard# kubectl create sa superadmin -n admin-ns --dry-run=client -o yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
name: superadmin
namespace: admin-ns

创建一个clusterrolebinding,让内置的集群管理员角色--clusterrole=cluster-admin与刚创建的serviceaccount绑定

1
2
3
4
5
6
7
8
9
10
11
12
13
14
root@k8s-m:/data/dashboard# kubectl  create clusterrolebinding  superadmin  --clusterrole=cluster-admin  --serviceaccount=admin-ns:superadmin --dry-run=client -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
name: superadmin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: superadmin
namespace: admin-ns

获取Token登录

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
root@k8s-m:/data/dashboard# kubectl  describe   sa/superadmin -n admin-ns
Name: superadmin
Namespace: admin-ns
Labels: <none>
Annotations: <none>
Image pull secrets: <none>
Mountable secrets: superadmin-token-gtrj5
Tokens: superadmin-token-gtrj5
Events: <none>
root@k8s-m:/data/dashboard# kubectl describe secret/superadmin-token-gtrj5 -n admin-ns
Name: superadmin-token-gtrj5
Namespace: admin-ns
Labels: <none>
Annotations: kubernetes.io/service-account.name: superadmin
kubernetes.io/service-account.uid: 339d914d-3ae8-440a-a590-4a304400ef17

Type: kubernetes.io/service-account-token

Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6InZk--xxx # Token字段
ca.crt: 1066 bytes
namespace: 8 bytes

界面自带CPU、内存监控图,只是数据是来源于Metrics Server, 需要部署MetricsServer才能展示,如果后期要部署kube-prometheus 就不需要单独部署Metrics Server,因为它己经集成了

metrics-server github 地址: https://github.com/kubernetes-sigs/metrics-server

在线清单文件部署

1
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

发现老是无限重启,说是就绪性探测和存活性探测有问题,我这里换换镜像以及添加2个参数好了
其实根源https TLS问题

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
spec:
hostNetwork: true
serviceAccountName: metrics-server
containers:
- name: metrics-server
image: bitnami/metrics-server:0.4.1 # 可以尝试更换官方镜像
#image: k8s.gcr.io/metrics-server/metrics-server:v0.4.1
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-insecure-tls # 禁用https
- --kubelet-use-node-status-port
- --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname # 添加地址解析类型
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
periodSeconds: 10
ports:

总结

  • dashboard只是提供web GUI服务,本身并做认证授权,只是代为拿着账号向kubernetes API进行认证
  • dashboard 运行于pod, pod代为向kubernetes API进行认证的账号也必须是ServiceAccount账号,不可以是User自然人的属性账号
  • clusterrolebinding只能绑定clusterrole, rolebinding 即可以绑role,也可以绑clusterrole,口决是: 小绑大降权
  • Metrics-server用于收集pod内部CPU、内存使用量资源,kube-prometheus项目内部集成此功能,所以部署与否看你需要

kubernetes configmap

创建configmap

1
2
# kubectl  get cm/calico-config -n kube-system  -o yaml
# kubectl get cm/kube-flannel-cfg -n kube-system -o yaml
1
2
3
4
5
kubectl  create ns  cm-ns
kubectl create cm -h
kubectl create cm filebeat-cfg -n cm-ns \
--from-literal=redis_host="redis.default.svc.cluster.local" \
--from-literal=log_level="Info"
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
kubectl get cm/filebeat-cfg -n cm-ns -o yaml
apiVersion: v1
data:
log_level: Info
redis_host: redis.default.svc.cluster.local
kind: ConfigMap
metadata:
creationTimestamp: "2021-01-11T05:58:23Z"
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:data:
.: {}
f:log_level: {}
f:redis_host: {}
manager: kubectl-create
operation: Update
time: "2021-01-11T05:58:23Z"
name: filebeat-cfg
namespace: cm-ns
resourceVersion: "573737"
uid: b06442dc-90e6-446b-8a3c-f119f26226d4

启一个pod运用configmap

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
root@k8s-m:/data/configmap# cat filebeat-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
name: filebeat-pod
namespace: cm-ns
spec:
containers:
- name: filebeat
image: ikubernetes/filebeat:5.6.5-alpine
env:
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: filebeat-cfg # configmap名字
key: redis_host # 定义的键名
- name: LOG_LEVEL
valueFrom:
configMapKeyRef:
name: filebeat-cfg # configmap名字
key: log_level # 定义的键名

root@k8s-m:/data/configmap# kubectl apply -f filebeat-pod.yaml
#pod内部是能查到的定义变量的
root@k8s-m:/data/configmap# kubectl exec -it pods/filebeat-pod -n cm-ns -- env | grep REDIS_HOST
REDIS_HOST=redis.default.svc.cluster.local

定义2个数据文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
root@k8s-m:/data/configmap/nginx# cat server1.conf 
server {
server_name www.first.com;
listen 80;
location / {
root "/html/first";
}

}
root@k8s-m:/data/configmap/nginx# cat server2.conf
server {
server_name www.second.com;
listen 80;
location / {
root "/html/second";
}

}

创建nginx-cfg

1
kubectl  create cm nginx-cfg --from-file=server1.conf  --from-file=server-sec.conf=server2.conf -n cm-ns

pod使用nginx-cfg

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
[root@k8smaster nginx]# cat nginx-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
namespace: cm-ns
spec:
containers:
- name: nginx-t
image: nginx
volumeMounts:
- name: config-vol
mountPath: /etc/nginx/conf.d/
volumes:
- name: config-vol
configMap:
name: nginx-cfg
items:
- key: server1.conf # 定义configmap时的key
path: server-first.conf # pod内部的文件名
- key: server-sec.conf # 定义configmap时的key
path: server-second.conf # pod内部的文件名

[root@k8smaster nginx]# kubectl apply -f nginx-pod.yaml
[root@k8smaster nginx]# kubectl exec -it pods/nginx-pod -n cm-ns -- ls /etc/nginx/conf.d
server-first.conf server-second.conf

kubectl edit cm/nginx-cfg -n cm-ns # 修改能在60s生效

总结

configmap也是一种存储卷,特殊的存储卷,用于保存pod内程序变量配置或文件等

kubernetes-csi-nfs

简介

要实现kubernetes中动态创建pv必须先创建StorageClass,每一个StorageClass对应了一个provisioner,kubernetes中内置了很多provisioner,但是很难受的是没有内置的nfs provisioner,对于我们这种没有钱的玩家,nfs是实现后端存储最简单直接的方法,所以不知道为什么k8s不提供,但是没关系有方法实现,k8s允许提供外部的provisioner,而nfs可以使用nfs-client-provisioner
项目链接

1
https://github.com/kubernetes-retired/external-storage/tree/master/nfs-client

创建nfs server

1
2
3
4
yum install nfs-utils -y
[root@node02 deploy]# cat /etc/exports
/csc-nfsdata/ *(rw,sync,no_root_squash)
[root@node02 deploy]#

开放防火墙

1
2
3
4
5
6
7
8
9
10
11
12
vim /etc/nfs.conf
[mountd]

port=port-number
# This adds the -p port-number option to the rpc.mount command line: rpc.mount -p port-number.

firewall-cmd --permanent --add-service mountd
firewall-cmd --permanent --add-service rpc-bind
firewall-cmd --permanent --add-service nfs
firewall-cmd --permanent --add-port=<mountd-port>/tcp
firewall-cmd --permanent --add-port=<mountd-port>/udp
firewall-cmd --reload

配置NFS-Client provisioner,首先clone这个项目

1
git clone https://github.com/kubernetes-retired/external-storage.git

之后进入nfs-client/deploy/文件夹

可以看到有下面几个文件

1
2
3
4
5
6
7
8
9
10
cd nfs-client/deploy/
[root@nginx1-pro deploy]# ll
总用量 28
-rw-r--r-- 1 root root 225 9月 3 14:26 class.yaml ## 需要改自己的ns
-rw-r--r-- 1 root root 1030 9月 3 13:17 deployment-arm.yaml
-rw-r--r-- 1 root root 1040 9月 3 13:30 deployment.yaml ## 需要改自己的ns
drwxr-xr-x 2 root root 214 9月 3 13:32 objects
-rw-r--r-- 1 root root 1834 9月 3 13:19 rbac.yaml ## 需要改自己的ns
-rw-r--r-- 1 root root 241 9月 3 13:55 test-claim.yaml
-rw-r--r-- 1 root root 399 9月 3 13:38 test-pod.yaml

首先你要做的是创建一个独立的命名空间

1
2
[root@k8smaster ~]# kubectl create ns csi-nfs
namespace/csi-nfs created

之后修改rbac.yaml和deployment.yaml的namespace为你创建的namespace

修改完成之后我们修改deployment.yaml

1
2
3
4
5
6
7
8
9
10
11
12
    env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.1.11
- name: NFS_PATH
value: /data/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.11
path: /data/kubernetes

执行rbac 和deploy yaml文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@node02 deploy]# kubectl  apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
[root@node02 deploy]#
[root@node02 deploy]# kubectl apply -f deployment.yaml
deployment.apps/nfs-client-provisioner created
[root@node02 deploy]#
[root@node02 deploy]# kubectl apply -f class.yaml
storageclass.storage.k8s.io/managed-nfs-storage created
[root@node02 deploy]#
查看cs
[root@node02 deploy]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-storage fuseim.pri/ifs Delete Immediate false 20s
[root@node02 deploy]#
cat class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"

provisioner就是刚才deployment中的PROVISIONER_NAME,还有一个比较关键的参数是archiveOnDelete,如果你想在删除了pvc之后还保留数据的话需要把这个参数改为true,不然你删除了pvc同时pv也会删除,然后数据也会丢失

1
2
3
4
5
6
7
kubectl apply class.yaml

kubectl apply -f class.yaml

很简单,只要pvc有
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"

pvc就能动态的创建出来

尝试创建

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[root@node02 deploy]# kubectl apply  -f  test-claim.yaml 

[root@node02 deploy]# cat test-claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Mi
[root@node02 deploy]#
查看pv和pvc
[root@node02 deploy]# kubectl get pv,pvc
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
persistentvolume/pvc-a0b85043-84a6-4f8a-9016-bba084a3f014 1Mi RWX Delete Bound default/test-claim managed-nfs-storage 29m

NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/test-claim Bound pvc-a0b85043-84a6-4f8a-9016-bba084a3f014 1Mi RWX managed-nfs-storage 29m
[root@node02 deploy]#

尝试删除pvc,查看pv也会被删除,只不过pv中的数据不会被删除。需要注意的是删除pvc之前要把所使用的pod 删除掉

pending排障

如果你部署的kubernetes是v1.20 v1.21.0以上,那么需要修改kube-apiserver.yaml支持selfLink功能

查看nfs-client-provisioner容器跑在哪个节点上,是否Running,确保宿主节点己安装nfs客户端可以正常挂载nfs-server卷

1
2
3
root@node01:/data/nfs-server/external-storage/nfs-client/deploy# kubectl  get pods -n csi-nfs -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-client-provisioner-56f95d5b56-k5cjl 1/1 Running 0 37m 172.16.18.93 node03 <none> <none>

到运行容器节点上查看日志:nfs-client-provisioner-xx容器日志

1
tail -f  /var/log/containers/nfs-client-provisioner-6b4

解决办法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
参考: www.orchome.com/10024
解决方法是编辑/etc/kubernetes/manifests/kube-apiserver.yaml
在这里:

spec:
containers:
- command:
- kube-apiserver
添加这一行:

- --feature-gates=RemoveSelfLink=false
然后应用它,即可

kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml

查看pvc状态,确保nfs-server 卷权限777,否则创建pv失败也会导致pvc Pending

1
kubectl get pvc # Pending

总结

  • 参考

kubernetes volumes

1
2
# kubectl  explain  pods.spec.volumes
# kubectl explain pods.spec.containers.volumeMounts

hostPath 为node节点本地卷

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
root@k8s-m:/data/volumes# cat 01-volume-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
name: vol-demo
namespace: vol
labels:
app: vol-demo
spec:
nodeName: k8s-n
containers:
- name: vol-demo
image: docker.io/ikubernetes/myapp:v1
volumeMounts:
- mountPath: /data
name: webstore
volumes:
- name: webstore
hostPath:
path: /volumes/vol-demo
type: DirectoryOrCreate

root@k8s-m:/data/volumes# kubectl apply -f 01-volume-demo.yaml

empty 临时存储,pod重启数据丢失

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
root@k8s-m:/data/volumes# cat  02-emptydir.yaml 
apiVersion: v1
kind: Pod
metadata:
name: vol-emptydir-pod
namespace: vol
spec:
volumes:
- name: html
emptyDir: {}
containers:
- name: nginx
image: nginx:1.14-alpine
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
- name: createpage
image: alpine
volumeMounts:
- name: html
mountPath: /html
command: ["/bin/sh","-c"]
args:
- while true;do
echo $(hostname) $(date) >> /html/index.html;
sleep 2;
done

root@k8s-m:/data/volumes# kubectl apply -f 02-emptydir.yaml
root@k8s-m:/data/volumes# kubectl get pods -n vol -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
vol-emptydir-pod 2/2 Running 0 2m47s 172.16.234.11 k8s-n <none> <none>

root@k8s-m:/data/volumes# wget -O - -q http://172.16.234.11
vol-emptydir-pod Mon Jan 11 03:30:24 UTC 2021
vol-emptydir-pod Mon Jan 11 03:30:26 UTC 2021
vol-emptydir-pod Mon Jan 11 03:30:28 UTC 2021 # 正如我们期望的那样,每2秒钟生成一条记录

nfs 存储卷

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
root@k8s-m:/data/volumes# cat 03-nfs-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
name: liveness-httpget-pod
namespace: vol
spec:
containers:
- name: liveness-httpget-pod
image: nginx
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
livenessProbe:
httpGet:
port: http
path: /index.html
initialDelaySeconds: 1
periodSeconds: 3
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html/
volumes:
- name: html
nfs:
path: /nginx_share
server: 192.168.1.10

root@k8s-m:/data/volumes# kubectl apply -f 03-nfs-pod.yaml
apt install nfs-kernel-server
root@k8s-m:/data/volumes# kubectl get pods -n vol -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
liveness-httpget-pod 1/1 Running 0 16m 172.16.234.13 k8s-n <none> <none>

root@k8s-m:/data/volumes# wget -O - -q 172.16.234.13
test nginx for nfs vol # 成功

redis Pod挂载nfs类型的卷,实现重建pod数据不丢失

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
root@k8s-m:/data/volumes# cat 04-nfs-redis.yaml 
apiVersion: v1
kind: Pod
metadata:
name: redis
namespace: vol
labels:
app: redis
spec:
containers:
- name: redis
image: redis:5.0.4
ports:
- name: redis-port
containerPort: 6379
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
nfs:
path: /vols/v6
server: 192.168.1.10

root@k8s-m:/data/volumes# kubectl apply -f 04-nfs-redis.yaml
root@k8s-m:/data/volumes# kubectl get pods -n vol -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
redis 1/1 Running 0 12m 172.16.234.12 k8s-n <none> <none>

root@k8s-m:/data/volumes# kubectl exec -it pods/redis -n vol -- bash
root@redis:/data# redis-cli
127.0.0.1:6379> set name wxq
OK
127.0.0.1:6379> set age 25
OK
127.0.0.1:6379> save
OK

root@k8s-m:/data/volumes# kubectl delete -f 04-nfs-redis.yaml
root@k8s-m:/data/volumes# kubectl apply -f 04-nfs-redis.yaml
root@k8s-m:/data/volumes# kubectl exec -it pods/redis -n vol -- bash
root@redis:/data# redis-cli
127.0.0.1:6379> get name
"wxq" # 挂载卷后数据仍然存在

利用 nfs创建一个pv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
root@k8s-m:/data/volumes/pvc# cat 01-pv-nfs.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
name: test-vol
labels:
storages: nfs
spec:
capacity:
storage: 2Gi
volumeMode: Filesystem
accessModes:
- ReadWriteMany
- ReadWriteOnce
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
nfs:
server: 192.168.1.10
path: /vols/v7

root@k8s-m:/data/volumes/pvc# kubectl apply -f 01-pv-nfs.yaml

创建pvc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
root@k8s-m:/data/volumes/pvc# cat 02-pvc-nfs-yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
namespace: vol
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi

#storageClassName: slow
# selector:
# matchLabels:
# release: "stable"
# matchExpressions:
# - {key: environment, operator: In, values: [dev]}
root@k8s-m:/data/volumes/pvc# kubectl apply -f 02-pvc-nfs-yaml

创建一个pod申请pvc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
root@k8s-m:/data/volumes/pvc# cat 03-pod-demo.yaml 
apiVersion: v1
kind: Pod
metadata:
name: pvc-pod-demo
namespace: vol
labels:
app: pvc-pod
spec:
nodeName: k8s-n
containers:
- name: pvc-pod
image: docker.io/ikubernetes/myapp:v1
volumeMounts:
- mountPath: /data
name: testpvc
volumes:
- name: testpvc
persistentVolumeClaim:
claimName: test-pvc

root@k8s-m:/data/volumes/pvc# kubectl apply -f 03-pod-demo.yaml
root@k8s-m:/data# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
test-vol 2Gi RWO,ROX,RWX Retain Bound vol/test-pvc 11m
root@k8s-m:/data# kubectl get pvc -n vol
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-pvc Bound test-vol 2Gi RWO,ROX,RWX 6m47s

总结

无它,唯手熟而,理解运用更重要