rook 部署,首先保证每个工作节点上都有未分配的磁盘空间,通过 lsblk -l 查看
rook cluster 部署
1
2
3
4
|
$ git clone --single-branch --branch v1.10.6 https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml
|
安装完后,等一会,只到所有的镜像都成功启动
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
[root@k-116-m1 examples]# kubectl -n rook-ceph get pod
NAME READY STATUS RESTARTS AGE
csi-cephfsplugin-5nrbf 2/2 Running 0 18h
csi-cephfsplugin-bhvvg 2/2 Running 0 17h
csi-cephfsplugin-provisioner-79b4b58798-964rr 5/5 Running 0 3h33m
csi-cephfsplugin-provisioner-79b4b58798-vrq79 5/5 Running 0 3h33m
csi-cephfsplugin-zjc44 2/2 Running 0 18h
csi-rbdplugin-ggtlj 2/2 Running 0 18h
csi-rbdplugin-provisioner-97f8449cd-6l8bv 5/5 Running 0 17h
csi-rbdplugin-provisioner-97f8449cd-cfwj8 5/5 Running 0 17h
csi-rbdplugin-r87fc 2/2 Running 0 17h
csi-rbdplugin-tpm8q 2/2 Running 0 18h
rook-ceph-crashcollector-k-116-n1-5dd965cb4f-7v4cj 1/1 Running 0 18h
rook-ceph-crashcollector-k-116-n2-c6f4784b4-knhn4 1/1 Running 0 3h29m
rook-ceph-crashcollector-k-116-n3-bcc6bd977-bdmkp 1/1 Running 0 3h29m
rook-ceph-mds-myfs-a-77767dfd95-xqw24 2/2 Running 0 3h29m
rook-ceph-mds-myfs-b-7cdf488fd4-8558w 2/2 Running 0 3h29m
rook-ceph-mgr-a-7768894c57-dz69h 3/3 Running 0 18h
rook-ceph-mgr-b-b7c6dfb68-kxfqq 3/3 Running 0 18h
rook-ceph-mon-a-68bf5f595f-rp7rf 2/2 Running 0 18h
rook-ceph-mon-b-854b75dc69-wdp8j 2/2 Running 0 18h
rook-ceph-mon-d-68cd9b8778-gffm4 2/2 Running 0 18h
rook-ceph-operator-946fb5c77-dvnm4 1/1 Running 0 18h
rook-ceph-osd-0-7b8cd54847-tl9nf 2/2 Running 0 18h
rook-ceph-osd-1-7f59fd6b44-tw858 2/2 Running 0 18h
rook-ceph-osd-2-7f58c999f4-86x8z 2/2 Running 0 18h
rook-ceph-osd-prepare-k-116-n1-fgrmb 0/1 Completed 0 18h
rook-ceph-osd-prepare-k-116-n2-hpmn8 0/1 Completed 0 18h
rook-ceph-osd-prepare-k-116-n3-85l68 0/1 Completed 0 18h
rook-ceph-tools-6584b468fd-pkh24 1/1 Running 0 17h
|
部署 toolbox
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
|
kubectl create -f deploy/examples/toolbox.yaml
[root@k-116-m1 examples]# kubectl -n rook-ceph rollout status deploy/rook-ceph-tools
deployment "rook-ceph-tools" successfully rolled out
[root@k-116-m1 examples]# kubectl -n rook-ceph rollout status deploy/rook-ceph-tools
deployment "rook-ceph-tools" successfully rolled out
[root@k-116-m1 examples]# ^C
[root@k-116-m1 examples]# kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash
bash-4.4$ cephfs status
bash: cephfs: command not found
bash-4.4$ ceph status
cluster:
id: 066a4f09-c9aa-4b54-bd75-5469d5236d8e
health: HEALTH_WARN
mons b,d are low on available space
services:
mon: 3 daemons, quorum a,b,d (age 18h)
mgr: a(active, since 18h), standbys: b
mds: 1/1 daemons up, 1 hot standby
osd: 3 osds: 3 up (since 18h), 3 in (since 18h)
data:
volumes: 1/1 healthy
pools: 3 pools, 49 pgs
objects: 28 objects, 1.1 MiB
usage: 91 MiB used, 30 GiB / 30 GiB avail
pgs: 49 active+clean
io:
client: 853 B/s rd, 1 op/s rd, 0 op/s wr
bash-4.4$ ceph osd pool ls
.mgr
myfs-metadata
myfs-data0
bash-4.4$
|
部署 kube-registry
filesystem.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
name: myfs
namespace: rook-ceph
spec:
metadataPool:
replicated:
size: 3
dataPools:
- name: replicated
replicated:
size: 3
preserveFilesystemOnDelete: true
metadataServer:
activeCount: 1
activeStandby: true
|
storageclass.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
|
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: rook-cephfs
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
# clusterID is the namespace where the rook cluster is running
# If you change this namespace, also change the namespace below where the secret namespaces are defined
clusterID: rook-ceph
# CephFS filesystem name into which the volume shall be created
fsName: myfs
# Ceph pool into which the volume shall be created
# Required for provisionVolume: "true" 这个需要修改
pool: myfs-data0
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
# in the same namespace as the cluster.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
|
kube-registry.yaml
:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: cephfs-pvc
namespace: kube-system
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: rook-cephfs
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
spec:
replicas: 3
selector:
matchLabels:
k8s-app: kube-registry
template:
metadata:
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
imagePullPolicy: Always
resources:
limits:
cpu: 100m
memory: 100Mi
env:
# Configuration reference: https://docs.docker.com/registry/configuration/
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_HTTP_SECRET
value: "Ple4seCh4ngeThisN0tAVerySecretV4lue"
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
livenessProbe:
httpGet:
path: /
port: registry
readinessProbe:
httpGet:
path: /
port: registry
volumes:
- name: image-store
persistentVolumeClaim:
claimName: cephfs-pvc
readOnly: false
|
Create the Kube registry deployment:
1 |
kubectl create -f deploy/examples/csi/cephfs/kube-registry.yaml |
|
|
参考文档:
https://github.com/rook/rook/tree/v1.10.5
https://rook.github.io/docs/rook/v1.10/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage/
镜像同步看这个:
https://github.com/anjia0532/gcr.io_mirror/issues/1796