侧边栏壁纸
博主头像
kevin's blog! 博主等级

行动起来,活在当下

  • 累计撰写 27 篇文章
  • 累计创建 17 个标签
  • 累计收到 0 条评论

目 录CONTENT

文章目录

helm 安装 rook-ceph

kevin
2024-05-09 / 0 评论 / 0 点赞 / 132 阅读 / 0 字

在 Kubernetes 集群中通过 Rook 部署 ceph 分布式存储集群
https://github.com/rook/rook

Prerequisites:
Kubernetes 1.22+
Helm 3.x
一主三从(最少)
所有 k8s 节点另外准备一块磁盘(裸盘不需要格式化)(/dev/sdb)

1、 rook-ceph-operator

helm repo add rook-release https://charts.rook.io/release

helm repo update

helm search repo rook-release/rook-ceph

helm pull rook-release/rook-ceph --version v1.12.2 --untar

cat > ~/rook-ceph/values-prod.yml << 'EOF'
image:
  repository: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph
  tag: ceph-v1.12.2
  pullPolicy: IfNotPresent

resources:
  limits:
    cpu: "2"
    memory: "4Gi"
  requests:
    cpu: "1"
    memory: "2Gi"

csi:
  cephcsi:
    # @default -- `quay.io/cephcsi/cephcsi:v3.9.0`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:cephcsi-v3.9.0

  registrar:
    # @default -- `registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:csi-node-driver-registrar-v2.8.0

  provisioner:
    # @default -- `registry.k8s.io/sig-storage/csi-provisioner:v3.5.0`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:csi-provisioner-v3.5.0

  snapshotter:
    # @default -- `registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:csi-snapshotter-v6.2.2

  attacher:
    # @default -- `registry.k8s.io/sig-storage/csi-attacher:v4.3.0`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:csi-attacher-v4.3.0

  resizer:
    # @default -- `registry.k8s.io/sig-storage/csi-resizer:v1.8.0`
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:csi-resizer-v1.8.0

  # -- Image pull policy
  imagePullPolicy: IfNotPresent

EOF
cd ~/rook-ceph

helm upgrade --install --create-namespace --namespace rook-ceph rook-ceph -f ./values-prod.yml .

2、rook-ceph-cluster

helm repo add rook-release https://charts.rook.io/release

helm repo update

helm search repo rook-release/rook-ceph-cluster

helm pull rook-release/rook-ceph-cluster --version v1.12.2 --untar
cat > ~/rook-ceph-cluster/values-prod.yml << 'EOF'

toolbox:
  enabled: true
  image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:ceph-ceph-v17.2.6
cephClusterSpec:
  cephVersion:
    image: ccr.ccs.tencentyun.com/huanghuanhui/rook-ceph:ceph-ceph-v17.2.6

EOF
cd ~/rook-ceph-cluster

helm upgrade --install --create-namespace --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph -f ./values-prod.yml .

kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash

3、NodePort(nodeport方式访问)

kubectl expose pod $(kubectl get pod -n rook-ceph | grep rook-ceph-mgr-a | awk '{print $1}') --type=NodePort --name=rook-ceph-mgr-a-service --port=8443

# kubectl delete service rook-ceph-mgr-a-service
# 访问地址:https://ip+端口

# 密码
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo

4、rook-ceph-mgr-dashboard-Ingress(ingress域名方式访问)

cat > ~/rook-ceph/rook-ceph-mgr-dashboard-Ingress.yml << 'EOF'
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: rook-ceph-mgr-dashboard-ingress
  namespace: rook-ceph
  annotations:
    kubernetes.io/ingress.class: "nginx"
    kubernetes.io/tls-acme: "true"
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    nginx.ingress.kubernetes.io/server-snippet: |
      proxy_ssl_verify off;
spec:
  rules:
    - host: rook-ceph.kevinspace.top
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: rook-ceph-mgr-dashboard
                port:
                  name: https-dashboard
  tls:
  - hosts:
    - rook-ceph.kevinspace.top
    secretName: rook-ceph-mgr-dashboard-ingress-tls
EOF

kubectl create secret -n rook-ceph \
tls rook-ceph-mgr-dashboard-ingress-tls \
--key=/root/ssl/kevinspace.top_nginx/kevinspace.top.key \
--cert=/root/ssl/kevinspace.top_nginx/kevinspace.top_bundle.crt

kubectl apply -f ~/rook-ceph/rook-ceph-mgr-dashboard-Ingress.yml


访问地址:rook-ceph.kevinspace.top
用户:admin
密码:
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode && echo

image-frac.png

helm 安装 rook-ceph 会自动安装3个sc,推荐使用:ceph-block

# https://github.com/rook/rook/issues/12758
ceph crash prune 3 # 保留最近3天的崩溃日志,并删除3天前的以前的日志
ceph crash prune 0

ceph crash ls
[root@master1 ~]# kubectl  get pod -n rook-ceph
NAME                                                READY   STATUS      RESTARTS   AGE
csi-cephfsplugin-bkrbr                              2/2     Running     0          30m
csi-cephfsplugin-mbwfg                              2/2     Running     0          30m
csi-cephfsplugin-provisioner-6cd7f5c469-mfntc       5/5     Running     0          30m
csi-cephfsplugin-provisioner-6cd7f5c469-pg9n2       5/5     Running     0          30m
csi-cephfsplugin-t9gvv                              2/2     Running     0          30m
csi-rbdplugin-7t8qj                                 2/2     Running     0          30m
csi-rbdplugin-82vt7                                 2/2     Running     0          30m
csi-rbdplugin-provisioner-5f45b9bbc5-24mp9          5/5     Running     0          30m
csi-rbdplugin-provisioner-5f45b9bbc5-z4pk8          5/5     Running     0          30m
csi-rbdplugin-r5bnw                                 2/2     Running     0          30m
rook-ceph-crashcollector-worker1-6fb86674d4-5vvmx   1/1     Running     0          21m
rook-ceph-crashcollector-worker2-94f8cc64f-shgbw    1/1     Running     0          19m
rook-ceph-crashcollector-worker3-74f5867d74-bm84d   1/1     Running     0          21m
rook-ceph-mds-ceph-filesystem-a-dcb44dcb4-hww6v     2/2     Running     0          21m
rook-ceph-mds-ceph-filesystem-b-6ffd848849-8zjkn    2/2     Running     0          21m
rook-ceph-mgr-a-76975d4759-tlcjx                    3/3     Running     0          23m
rook-ceph-mgr-b-66866fbf78-mpcdh                    3/3     Running     0          23m
rook-ceph-mon-a-5cd7f8dbb7-t7dks                    2/2     Running     0          30m
rook-ceph-mon-b-b6d75d468-l94qj                     2/2     Running     0          26m
rook-ceph-mon-c-6d57dc9474-lh5n8                    2/2     Running     0          24m
rook-ceph-operator-7c886f57d4-2psbt                 1/1     Running     0          35m
rook-ceph-osd-0-59475457f-s7zfc                     2/2     Running     0          23m
rook-ceph-osd-1-849c9b9d6-4m62c                     2/2     Running     0          22m
rook-ceph-osd-2-5f47d4785f-k6nfw                    2/2     Running     0          22m
rook-ceph-osd-prepare-worker1-9zgj4                 0/1     Completed   0          20m
rook-ceph-osd-prepare-worker2-74mnx                 0/1     Completed   0          20m
rook-ceph-osd-prepare-worker3-x42tv                 0/1     Completed   0          20m
rook-ceph-rgw-ceph-objectstore-a-86b9cfc8cb-thqzv   2/2     Running     0          19m
rook-ceph-tools-5469c66bbf-pmmxg                    1/1     Running     0          33m

[root@master1 ~]# kubectl get svc
NAME                             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
rook-ceph-mgr                    ClusterIP   10.111.115.91    <none>        9283/TCP            24m
rook-ceph-mgr-dashboard          ClusterIP   10.108.26.167    <none>        8443/TCP            24m
rook-ceph-mon-a                  ClusterIP   10.105.182.107   <none>        6789/TCP,3300/TCP   31m
rook-ceph-mon-b                  ClusterIP   10.103.62.93     <none>        6789/TCP,3300/TCP   30m
rook-ceph-mon-c                  ClusterIP   10.98.148.23     <none>        6789/TCP,3300/TCP   25m
rook-ceph-rgw-ceph-objectstore   ClusterIP   10.110.252.234   <none>        80/TCP              22m

[root@master1 ~]# kubectl get sc
NAME                   PROVISIONER                     RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-block (default)   rook-ceph.rbd.csi.ceph.com      Delete          Immediate           true                   32m
ceph-bucket            rook-ceph.ceph.rook.io/bucket   Delete          Immediate           false                  32m
ceph-filesystem        rook-ceph.cephfs.csi.ceph.com   Delete          Immediate           true                   32m
0

评论区