ANF CEPH 2022 du 03 au 07/10/2022 Sébastien Geiger # installation d'un cluster ceph # depuis cephclt préparaer les nodes ceph [almalinux@cephclt ~]$ for i in {1..4}; do ssh root@ceph$i yum install -y podman lvm2; done # connexion au premier node [almalinux@cephclt ~]$ ssh ceph1 [almalinux@ceph1 ~]$ sudo yum -y install https://download.ceph.com/rpm-15.2.12/el8/noarch/cephadm-15.2.12-0.el8.noarch.rpm [almalinux@ceph1 ~]$ monip=$(getent ahostsv4 ceph1 |head -n 1| awk '{ print $1 }') [almalinux@ceph1 ~]$ sudo cephadm bootstrap --mon-ip $monip --initial-dashboard-password demo1demo ... Generating a dashboard self-signed certificate... Creating initial admin user... Fetching dashboard port number... Ceph Dashboard is now available at: URL: https://ceph1.novalocal:8443/ User: admin Password: demo1demo You can access the Ceph CLI with: sudo /sbin/cephadm shell --fsid 92459a10-1975-11ed-9374-fa163e5fdb7c -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring Please consider enabling telemetry to help improve Ceph: ceph telemetry on For more information see: https://docs.ceph.com/docs/master/mgr/telemetry/ Bootstrap complete. # ajouter la clé ssh ceph.pub aux nodes [almalinux@ceph1 ~]$ for i in {2..4}; do ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph$i; done # préparer l'installtion de cephadm [almalinux@ceph1 ~]$ for i in {2..4}; do ssh root@ceph$i yum -y install https://download.ceph.com/rpm-15.2.12/el8/noarch/cephadm-15.2.12-0.el8.noarch.rpm; done # démarrer en shell ceph [almalinux@ceph1 ~]$ sudo cephadm shell nferring fsid 92459a10-1975-11ed-9374-fa163e5fdb7c Inferring config /var/lib/ceph/92459a10-1975-11ed-9374-fa163e5fdb7c/mon.ceph1/config Using recent ceph image docker.io/ceph/ceph@sha256:056637972a107df4096f10951e4216b21fcd8ae0b9fb4552e628d35df3f61139 [ceph: root@ceph1 /]# # ceph status [ceph: root@ceph1 /]# ceph -s cluster: id: 92459a10-1975-11ed-9374-fa163e5fdb7c health: HEALTH_WARN OSD count 0 < osd_pool_default_size 3 services: mon: 1 daemons, quorum ceph1 (age 4m) mgr: ceph1.inxizw(active, since 3m) osd: 0 osds: 0 up, 0 in data: pools: 0 pools, 0 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs: # remarque regarder le nombre d'osd #ajouter les node 2 et 3 [ceph: root@ceph1 /]# ceph orch host add ceph2 Added host 'ceph2' [ceph: root@ceph1 /]# ceph orch host add ceph3 Added host 'ceph3' #remarque: ne pas faire ceph4, reservé pour l'agrandissement #_ceph orch host add ceph4 # afficher les devices des nodes # remarque : la taile de disques n'est pas identique, c'est volontaire [ceph: root@ceph1 /]# ceph orch device ls Hostname Path Type Serial Size Health Ident Fault Available ceph1 /dev/vdb hdd a093fb0d-1b06-4362-8 42.9G Unknown N/A N/A Yes ceph1 /dev/vdc hdd 925485ea-8aad-4f26-a 53.6G Unknown N/A N/A Yes ceph2 /dev/vdb hdd ebea0528-ad0c-4f72-8 42.9G Unknown N/A N/A Yes ceph2 /dev/vdc hdd 49ff4990-8acd-439b-a 53.6G Unknown N/A N/A Yes ceph3 /dev/vdb hdd 6d5cbff6-51f6-4ffd-a 42.9G Unknown N/A N/A Yes ceph3 /dev/vdc hdd 20273862-fd7e-423d-a 64.4G Unknown N/A N/A Yes # install les osd [ceph: root@ceph1 /]# ceph orch apply osd --all-available-devices Scheduled osd.all-available-devices update... # watch les evenements lors de l'installation des osd [ceph: root@ceph1 /]# ceph -W cephadm cluster: id: 92459a10-1975-11ed-9374-fa163e5fdb7c health: HEALTH_OK services: mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 93s) mgr: ceph1.inxizw(active, since 7m), standbys: ceph2.nwehoh osd: 6 osds: 0 up, 0 in data: pools: 1 pools, 1 pgs objects: 0 objects, 0 B usage: 0 B used, 0 B / 0 B avail pgs: 100.000% pgs unknown 1 unknown 2022-08-11T13:09:29.470828+0000 mgr.ceph1.inxizw [INF] Deploying daemon osd.3 on ceph1 2022-08-11T13:09:30.356837+0000 mgr.ceph1.inxizw [INF] Deploying daemon osd.5 on ceph3 2022-08-11T13:09:30.500424+0000 mgr.ceph1.inxizw [INF] Deploying daemon osd.4 on ceph2 2022-08-11T13:09:39.450670+0000 mgr.ceph1.inxizw [INF] refreshing ceph2 facts 2022-08-11T13:09:40.297052+0000 mgr.ceph1.inxizw [INF] Applying drive group all-available-devices on host ceph1... 2022-08-11T13:09:40.297806+0000 mgr.ceph1.inxizw [INF] Applying drive group all-available-devices on host ceph2... 2022-08-11T13:09:40.297952+0000 mgr.ceph1.inxizw [INF] Applying drive group all-available-devices on host ceph3... # attendre la fin # afficher l'ensemble des osds et des nodes [ceph: root@ceph1 /]# ceph osd tree ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF -1 0.27347 root default -3 0.08789 host ceph1 0 hdd 0.03909 osd.0 up 1.00000 1.00000 3 hdd 0.04880 osd.3 up 1.00000 1.00000 -7 0.08789 host ceph2 2 hdd 0.03909 osd.2 up 1.00000 1.00000 4 hdd 0.04880 osd.4 up 1.00000 1.00000 -5 0.09769 host ceph3 1 hdd 0.03909 osd.1 up 1.00000 1.00000 5 hdd 0.05859 osd.5 up 1.00000 1.00000 # afficher le status du cluster # remarque: il y a bien 3 mon, 2 mgr et 6 osds up et in [ceph: root@ceph1 /]# ceph -s cluster: id: 92459a10-1975-11ed-9374-fa163e5fdb7c health: HEALTH_OK services: mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m) mgr: ceph1.inxizw(active, since 10m), standbys: ceph2.nwehoh osd: 6 osds: 6 up (since 2m), 6 in (since 2m) data: pools: 1 pools, 1 pgs objects: 0 objects, 0 B usage: 6.0 GiB used, 274 GiB / 280 GiB avail pgs: 1 active+clean # remarque: il y a bien les 6 osd dans le cluster