TP Mirroring Ceph block devices ANF CEPH 2022 Sébastien Geiger # depuis un shell CEPH du cluster A [ceph: root@cna1 /]# ceph osd pool create data 16 pool 'data' created [ceph: root@cna1 /]# ceph osd pool application enable data rbd enabled application 'rbd' on pool 'data' [ceph: root@cna1 /]# rbd create imagea1 --size 1024 --pool data --image-feature exclusive-lock,journaling [ceph: root@cna1 /]# rbd mirror pool enable data pool [ceph: root@cna1 /]# rbd mirror pool info data Mode: pool Site Name: eb65901a-f3d6-11ec-8d25-fa163ea7c9dc Peer Sites: none # remarque: pas de peer sites [ceph: root@cnb1 /]# ceph -s [ceph: root@cnb1 /]# ceph orch apply rbd-mirror --placement=cnb3 Scheduled rbd-mirror update... [ceph: root@cnb1 /]# ceph osd pool create data 16 pool 'data' created [ceph: root@cnb1 /]# ceph osd pool application enable data rbd enabled application 'rbd' on pool 'data' [ceph: root@cnb1 /]# rbd mirror pool enable data pool [ceph: root@cnb1 /]# rbd mirror pool info data Mode: pool Site Name: 5460c242-f60c-11ec-85e7-fa163e50b5d1 Peer Sites: none [ceph: root@cnb1 /]# [ceph: root@cna1 /]# rbd mirror pool peer bootstrap create --site-name sitea data > token_sitea [ceph: root@cna1 /]# cat token_sitea eyJmc2lkIjoiZWI2NTkwMWEtZjNkNi0xMWVjLThkMjUtZmExNjNlYTdjOWRjIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU03dGlOTklBTGhBQWNzNEJqbEhTRDB3QjB4MWZqYkRmVUE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjIzOjMzMDAvMCx2MToxNzIuMTYuNy4yMzo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4xMTQ6MzMwMC8wLHYxOjE3Mi4xNi43LjExNDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4xMTA6MzMwMC8wLHYxOjE3Mi4xNi43LjExMDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4yMzc6MzMwMC8wLHYxOjE3Mi4xNi43LjIzNzo2Nzg5LzBdIn0= [ceph: root@cna1 /]# [ceph: root@cnb1 /]# echo eyJmc2lkIjoiZWI2NTkwMWEtZjNkNi0xMWVjLThkMjUtZmExNjNlYTdjOWRjIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU03dGlOTklBTGhBQWNzNEJqbEhTRDB3QjB4MWZqYkRmVUE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjIzOjMzMDAvMCx2MToxNzIuMTYuNy4yMzo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4xMTQ6MzMwMC8wLHYxOjE3Mi4xNi43LjExNDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4xMTA6MzMwMC8wLHYxOjE3Mi4xNi43LjExMDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy4yMzc6MzMwMC8wLHYxOjE3Mi4xNi43LjIzNzo2Nzg5LzBdIn0=>token_sitea [ceph: root@cnb1 /]# cat token_sitea |base64 -d |jq -r .key >sitea.keyfile [ceph: root@cnb1 /]# remote_monsitea=$(cat token_sitea |base64 -d |jq -r .mon_host) [ceph: root@cnb1 /]# echo $remote_monsitea [v2:172.16.7.23:3300/0,v1:172.16.7.23:6789/0] [v2:172.16.7.114:3300/0,v1:172.16.7.114:6789/0] [v2:172.16.7.110:3300/0,v1:172.16.7.110:6789/0] [v2:172.16.7.237:3300/0,v1:172.16.7.237:6789/0] [ceph: root@cnb1 /]# rbd mirror pool peer add data client.rbd-mirror-peer@sitea --remote-mon-host "$remote_monsitea" --remote-key-file sitea.keyfile --direction rx-tx b7343132-329c-4cf0-ab2d-7ee90eddc507 [ceph: root@cnb1 /]# rbd mirror pool info data Mode: pool Site Name: 5460c242-f60c-11ec-85e7-fa163e50b5d1 Peer Sites: UUID: b7343132-329c-4cf0-ab2d-7ee90eddc507 Name: sitea Mirror UUID: Direction: rx-tx Client: client.rbd-mirror-peer [ceph: root@cnb1 /]# rbd mirror pool peer bootstrap create --site-name siteb data > token_siteb [ceph: root@cnb1 /]# cat token_siteb eyJmc2lkIjoiNTQ2MGMyNDItZjYwYy0xMWVjLTg1ZTctZmExNjNlNTBiNWQxIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU5MdGl4OWNiRkJBQVpONDJScGF0S3dYck9PZWNRdEhrckE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjQwOjMzMDAvMCx2MToxNzIuMTYuNy40MDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy45OTozMzAwLzAsdjE6MTcyLjE2LjcuOTk6Njc4OS8wXSBbdjI6MTcyLjE2LjcuMTc2OjMzMDAvMCx2MToxNzIuMTYuNy4xNzY6Njc4OS8wXSBbdjI6MTcyLjE2LjcuNDM6MzMwMC8wLHYxOjE3Mi4xNi43LjQzOjY3ODkvMF0ifQ== [ceph: root@cnb1 /]# [ceph: root@cna1 /]# [ceph: root@cna1 /]# echo eyJmc2lkIjoiNTQ2MGMyNDItZjYwYy0xMWVjLTg1ZTctZmExNjNlNTBiNWQxIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU5MdGl4OWNiRkJBQVpONDJScGF0S3dYck9PZWNRdEhrckE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjQwOjMzMDAvMCx2MToxNzIuMTYuNy40MDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy45OTozMzAwLzAsdjE6MTcyLjE2LjcuOTk6Njc4OS8wXSBbdjI6MTcyLjE2LjcuMTc2OjMzMDAvMCx2MToxNzIuMTYuNy4xNzY6Njc4OS8wXSBbdjI6MTcyLjE2LjcuNDM6MzMwMC8wLHYxOjE3Mi4xNi43LjQzOjY3ODkvMF0ifQ== >token_siteb [ceph: root@cna1 /]# ceph orch apply rbd-mirror --placement=cna3 Scheduled rbd-mirror update... [ceph: root@cna1 /]# [ceph: root@cna1 /]# cat token_siteb |base64 -d |jq -r .key >siteb.keyfile [ceph: root@cna1 /]# remote_monsiteb=$(cat token_siteb |base64 -d |jq -r .mon_host) [ceph: root@cna1 /]# echo $remote_monsiteb [v2:172.16.7.40:3300/0,v1:172.16.7.40:6789/0] [v2:172.16.7.99:3300/0,v1:172.16.7.99:6789/0] [v2:172.16.7.176:3300/0,v1:172.16.7.176:6789/0] [v2:172.16.7.43:3300/0,v1:172.16.7.43:6789/0] [ceph: root@cna1 /]# rbd mirror pool peer add data client.rbd-mirror-peer@siteb --remote-mon-host "$remote_monsiteb" --remote-key-file siteb.keyfile --direction rx-tx rbd: mirror peer already exists 2022-06-28T17:11:01.317+0000 7fd3ec65e380 -1 librbd::api::Mirror: peer_site_add: failed to add mirror peer 'siteb': (17) File exists # si la replications existe deja, elle il faut la supprimer et recommecer, sinon passer a la suite [ceph: root@cna1 /]# rbd mirror pool info data Mode: pool Site Name: sitea Peer Sites: UUID: ee09ea9a-1672-4e69-b449-04374e6ba251 Name: siteb Mirror UUID: d21f1d9a-3414-424e-a2e5-da875dcb3bea Direction: tx-only [ceph: root@cna1 /]# rbd mirror pool peer remove data ee09ea9a-1672-4e69-b449-04374e6ba251 [ceph: root@cna1 /]# rbd mirror pool info data Mode: pool Site Name: sitea Peer Sites: none [ceph: root@cna1 /]# rbd mirror pool peer add data client.rbd-mirror-peer@siteb --remote-mon-host "$remote_monsiteb" --remote-key-file siteb.keyfile --direction rx-tx [ceph: root@cna1 /]# rbd mirror pool status data health: OK daemon health: OK image health: OK images: 1 total 1 replaying [ceph: root@cna1 /]# # ok la réplication est activé. # partie cephclt pour consommer les ressources [almalinux@cephclt ~]$ sudo yum -y install centos-release-ceph-pacific.noarch [almalinux@cephclt ~]$ sudo yum -y install ceph-common [almalinux@cephclt ~]$ sudo yum -y install rbd-nbd [almalinux@cna1 ~]$ sudo cephadm shell cat /etc/ceph/ceph.conf >sitea.conf Inferring fsid eb65901a-f3d6-11ec-8d25-fa163ea7c9dc Inferring config /var/lib/ceph/eb65901a-f3d6-11ec-8d25-fa163ea7c9dc/mon.cna1/config Using ceph image with id 'e5af760fa1c1' and tag 'v17' created on 2022-06-23 19:49:45 +0000 UTC quay.io/ceph/ceph@sha256:d3f3e1b59a304a280a3a81641ca730982da141dad41e942631e4c5d88711a66b [almalinux@cna1 ~]$ sudo cephadm shell ceph auth get client.rbd-mirror-peer >sitea.client.rbd-mirror-peer.keyring Inferring fsid eb65901a-f3d6-11ec-8d25-fa163ea7c9dc Inferring config /var/lib/ceph/eb65901a-f3d6-11ec-8d25-fa163ea7c9dc/mon.cna1/config Using ceph image with id 'e5af760fa1c1' and tag 'v17' created on 2022-06-23 19:49:45 +0000 UTC quay.io/ceph/ceph@sha256:d3f3e1b59a304a280a3a81641ca730982da141dad41e942631e4c5d88711a66b exported keyring for client.rbd-mirror-peer [almalinux@cna1 ~]$ scp sitea* root@cephclt:/etc/ceph sitea.client.rbd-mirror-peer.keyring 100% 137 89.5KB/s 00:00 sitea.conf 100% 359 365.2KB/s 00:00 [almalinux@cna1 ~]$ [almalinux@cnb1 ~]$ sudo cephadm shell cat /etc/ceph/ceph.conf >siteb.conf Inferring fsid 5460c242-f60c-11ec-85e7-fa163e50b5d1 Inferring config /var/lib/ceph/5460c242-f60c-11ec-85e7-fa163e50b5d1/mon.cnb1/config Using ceph image with id 'e5af760fa1c1' and tag 'v17' created on 2022-06-23 19:49:45 +0000 UTC quay.io/ceph/ceph@sha256:d3f3e1b59a304a280a3a81641ca730982da141dad41e942631e4c5d88711a66b [almalinux@cnb1 ~]$ sudo cephadm shell ceph auth get client.rbd-mirror-peer >siteb.client.rbd-mirror-peer.keyring Inferring fsid 5460c242-f60c-11ec-85e7-fa163e50b5d1 Inferring config /var/lib/ceph/5460c242-f60c-11ec-85e7-fa163e50b5d1/mon.cnb1/config Using ceph image with id 'e5af760fa1c1' and tag 'v17' created on 2022-06-23 19:49:45 +0000 UTC quay.io/ceph/ceph@sha256:d3f3e1b59a304a280a3a81641ca730982da141dad41e942631e4c5d88711a66b exported keyring for client.rbd-mirror-peer [almalinux@cnb1 ~]$ scp siteb* root@cephclt:/etc/ceph The authenticity of host 'cephclt (172.16.7.196)' can't be established. ECDSA key fingerprint is SHA256:50VnJhaZr65ArQwnHyhiFYhQPgHHfCGpGy7ohC/k9dg. Are you sure you want to continue connecting (yes/no/[fingerprint])? yes Warning: Permanently added 'cephclt,172.16.7.196' (ECDSA) to the list of known hosts. siteb.client.rbd-mirror-peer.keyring 100% 137 76.9KB/s 00:00 siteb.conf 100% 355 437.7KB/s 00:00 [almalinux@cnb1 ~]$ # vérification qui est primaire ? [almalinux@cephclt ~]$ [almalinux@cephclt ~]$ rbd --cluster sitea --id rbd-mirror-peer info data/imagea1 |grep primary mirroring primary: true [almalinux@cephclt ~]$ rbd --cluster siteb --id rbd-mirror-peer info data/imagea1 |grep primary mirroring primary: false # sitea est bien primaire # passons sous root pour simplifier les commandes [almalinux@cephclt ~]$ sudo -i [root@cephclt ~]# rbd-nbd --cluster sitea --id rbd-mirror-peer map data/imagea1 /dev/nbd0 [root@cephclt ~]# mkfs.xfs /dev/nbd0 meta-data=/dev/nbd0 isize=512 agcount=4, agsize=65536 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=0 inobtcount=0 data = bsize=4096 blocks=262144, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. [root@cephclt ~]# mkdir /work [root@cephclt ~]# mount /dev/nbd0 /work [root@cephclt ~]# echo "depuis sitea" > /work/prod.txt [root@cephclt ~]# umount /work/ [root@cephclt ~]# rbd-nbd unmap /dev/nbd0 [root@cephclt ~]# # bascullement du siteA vers siteB [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror pool demote data Demoted 1 mirrored images [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer mirror pool promote data Promoted 1 mirrored images [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer info data/imagea1 |grep primary mirroring primary: false [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer info data/imagea1 |grep primary mirroring primary: true # remarque : si siteb est primary, on peux monter le volume, sinon attendre la fin de la synchronisation [root@cephclt ~]# rbd-nbd --cluster siteb --id rbd-mirror-peer map data/imagea1 /dev/nbd0 [root@cephclt ~]# mount /dev/nbd0 /work [root@cephclt ~]# cat /work/prod.txt depuis sitea [root@cephclt ~]# echo "PRA depuis siteb" >>/work/prod.txt [root@cephclt ~]# cat /work/prod.txt depuis sitea PRA depuis siteb [root@cephclt ~]# umount /work [root@cephclt ~]# rbd-nbd unmap /dev/nbd0 [root@cephclt ~]# # rebasculement siteA en primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer mirror pool demote data Demoted 1 mirrored images [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror pool promote data 2022-06-28T17:34:38.216+0000 7fc23ae90700 -1 librbd::mirror::PromoteRequest: 0x7fc218001c30 handle_get_info: image is still primary within a remote cluster 2022-06-28T17:34:38.216+0000 7fc23ae90700 -1 librbd::io::AioCompletion: 0x561376e88b40 fail: (16) Device or resource busy rbd: failed to promote image imagea1: (16) Device or resource busy Promoted 0 mirrored images # la réplication n'est pas encore terminé, il faut attendre quelques instant, puis relancer la commande [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror pool promote data Promoted 1 mirrored images [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer info data/imagea1 |grep primary mirroring primary: true [root@cephclt ~]# rbd-nbd --cluster sitea --id rbd-mirror-peer map data/imagea1 /dev/nbd0 [root@cephclt ~]# mount /dev/nbd0 /work [root@cephclt ~]# cat /work/prod.txt depuis sitea PRA depuis siteb [root@cephclt ~]# echo "retour prod sitea" >>/work/prod.txt [root@cephclt ~]# # passage au siteb primaire puis rebasculement vers le sitea, ca fonctionne # ajouter une nouvelle image dans le pool et verifier quelle soit prise en compte lors du demote et du promote [ceph: root@cna1 /]# rbd create imagea2 --size 1024 --pool data --image-feature exclusive-lock,journaling [ceph: root@cna1 /]# rbd ls -l data NAME SIZE PARENT FMT PROT LOCK imagea1 1 GiB 2 excl imagea2 1 GiB 2 [ceph: root@cna1 /]# rbd mirror pool status data health: OK daemon health: OK image health: OK images: 2 total 2 replaying # remarque: il y a bien 2 images dans le pools qui sont synchronisées. # création une réplication par snap [ceph: root@cna1 /]# ceph osd pool create datasp 16 pool 'datasp' created [ceph: root@cna1 /]# ceph osd pool application enable datasp rbd enabled application 'rbd' on pool 'datasp' [ceph: root@cna1 /]# rbd create imagesp1 --size 1024 --pool datasp --image-feature exclusive-lock,object-map,fast-diff [ceph: root@cna1 /]# rbd mirror pool enable datasp image [ceph: root@cna1 /]# rbd mirror image enable datasp/imagesp1 snapshot Mirroring enabled [ceph: root@cna1 /]# [ceph: root@cnb1 /]# ceph osd pool create datasp 16 pool 'datasp' created [ceph: root@cnb1 /]# ceph osd pool application enable datasp rbd enabled application 'rbd' on pool 'datasp' [ceph: root@cnb1 /]# rbd create imagesp2 --size 1024 --pool datasp --image-feature exclusive-lock,object-map,fast-diff [ceph: root@cnb1 /]# rbd mirror pool enable datasp image [ceph: root@cnb1 /]# rbd mirror image enable datasp/imagesp2 snapshot Mirroring enabled [ceph: root@cnb1 /]# rbd mirror snapshot schedule add --pool datasp 5m [ceph: root@cna1 /]# rbd mirror pool peer bootstrap create --site-name sitea data > token_sitea [ceph: root@cna1 /]# cat token_sitea eyJmc2lkIjoiYWM5MGM1NWUtMjRhOS0xMWVkLWE5MTMtZmExNjNlZDllNzY1IiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFCdTNnaGpvclpRR2hBQU9DbGxqbTM2Zkd6bzFIN24wcjNIemc9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjI0MjozMzAwLzAsdjE6MTcyLjE2LjcuMjQyOjY3ODkvMF0gW3YyOjE3Mi4xNi43LjEzOTozMzAwLzAsdjE6MTcyLjE2LjcuMTM5OjY3ODkvMF0gW3YyOjE3Mi4xNi43LjIxMjozMzAwLzAsdjE6MTcyLjE2LjcuMjEyOjY3ODkvMF0gW3YyOjE3Mi4xNi43Ljc4OjMzMDAvMCx2MToxNzIuMTYuNy43ODo2Nzg5LzBdIn0= [ceph: root@cnb1 /]# echo eyJmc2lkIjoiYWM5MGM1NWUtMjRhOS0xMWVkLWE5MTMtZmExNjNlZDllNzY1IiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFCdTNnaGpvclpRR2hBQU9DbGxqbTM2Zkd6bzFIN24wcjNIemc9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjI0MjozMzAwLzAsdjE6MTcyLjE2LjcuMjQyOjY3ODkvMF0gW3YyOjE3Mi4xNi43LjEzOTozMzAwLzAsdjE6MTcyLjE2LjcuMTM5OjY3ODkvMF0gW3YyOjE3Mi4xNi43LjIxMjozMzAwLzAsdjE6MTcyLjE2LjcuMjEyOjY3ODkvMF0gW3YyOjE3Mi4xNi43Ljc4OjMzMDAvMCx2MToxNzIuMTYuNy43ODo2Nzg5LzBdIn0=>token_sitea [ceph: root@cnb1 /]# [ceph: root@cnb1 /]# cat token_sitea |base64 -d |jq -r .key >sitea.keyfile [ceph: root@cnb1 /]# remote_monsitea=$(cat token_sitea |base64 -d |jq -r .mon_host) [ceph: root@cnb1 /]# echo $remote_monsitea [v2:172.16.7.23:3300/0,v1:172.16.7.23:6789/0] [v2:172.16.7.114:3300/0,v1:172.16.7.114:6789/0] [v2:172.16.7.110:3300/0,v1:172.16.7.110:6789/0] [v2:172.16.7.237:3300/0,v1:172.16.7.237:6789/0] [ceph: root@cnb1 /]# rbd mirror pool peer add datasp client.rbd-mirror-peer@sitea --remote-mon-host "$remote_monsitea" --remote-key-file sitea.keyfile --direction rx-tx bc32b069-a859-4246-8dee-2c60749815ed [ceph: root@cnb1 /]# rbd mirror pool peer bootstrap create --site-name siteb data > token_siteb [ceph: root@cnb1 /]# cat token_siteb eyJmc2lkIjoiNTQ2MGMyNDItZjYwYy0xMWVjLTg1ZTctZmExNjNlNTBiNWQxIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU5MdGl4OWNiRkJBQVpONDJScGF0S3dYck9PZWNRdEhrckE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjQwOjMzMDAvMCx2MToxNzIuMTYuNy40MDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy45OTozMzAwLzAsdjE6MTcyLjE2LjcuOTk6Njc4OS8wXSBbdjI6MTcyLjE2LjcuMTc2OjMzMDAvMCx2MToxNzIuMTYuNy4xNzY6Njc4OS8wXSBbdjI6MTcyLjE2LjcuNDM6MzMwMC8wLHYxOjE3Mi4xNi43LjQzOjY3ODkvMF0ifQ== [ceph: root@cna1 /]# echo eyJmc2lkIjoiNTQ2MGMyNDItZjYwYy0xMWVjLTg1ZTctZmExNjNlNTBiNWQxIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFEWU5MdGl4OWNiRkJBQVpONDJScGF0S3dYck9PZWNRdEhrckE9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4xNi43LjQwOjMzMDAvMCx2MToxNzIuMTYuNy40MDo2Nzg5LzBdIFt2MjoxNzIuMTYuNy45OTozMzAwLzAsdjE6MTcyLjE2LjcuOTk6Njc4OS8wXSBbdjI6MTcyLjE2LjcuMTc2OjMzMDAvMCx2MToxNzIuMTYuNy4xNzY6Njc4OS8wXSBbdjI6MTcyLjE2LjcuNDM6MzMwMC8wLHYxOjE3Mi4xNi43LjQzOjY3ODkvMF0ifQ>token_siteb [ceph: root@cna1 /]# cat token_siteb |base64 -d |jq -r .key >siteb.keyfile [ceph: root@cna1 /]# remote_monsiteb=$(cat token_siteb |base64 -d |jq -r .mon_host) [ceph: root@cna1 /]# echo $remote_monsiteb [v2:172.16.7.40:3300/0,v1:172.16.7.40:6789/0] [v2:172.16.7.99:3300/0,v1:172.16.7.99:6789/0] [v2:172.16.7.176:3300/0,v1:172.16.7.176:6789/0] [v2:172.16.7.43:3300/0,v1:172.16.7.43:6789/0] [ceph: root@cna1 /]# rbd mirror pool peer add datasp client.rbd-mirror-peer@siteb --remote-mon-host "$remote_monsiteb" --remote-key-file siteb.keyfile --direction rx-tx rbd: mirror peer already exists 2022-06-28T18:08:11.283+0000 7ff037388380 -1 librbd::api::Mirror: peer_site_add: failed to add mirror peer 'siteb': (17) File exists # s'il y a une erreur, c'est qu'il y a des une réplication qui a été créé automatiquement, mais elle n'est pas bidirectionnelle par defaut. [ceph: root@cna1 /]# [ceph: root@cna1 /]# rbd mirror pool info datasp Mode: image Site Name: sitea Peer Sites: UUID: e8b69f53-d21d-48c1-a8a6-f7a919e9d08a Name: siteb Mirror UUID: 06eb52b4-97b0-42ef-8858-b0d8d3b9fe6c Direction: tx-only # il suffit de la supprimer et de la reconfigurer en mode two-way. [ceph: root@cna1 /]# rbd mirror pool peer remove datasp e8b69f53-d21d-48c1-a8a6-f7a919e9d08a [ceph: root@cna1 /]# rbd mirror pool peer add datasp client.rbd-mirror-peer@siteb --remote-mon-host "$remote_monsiteb" --remote-key-file siteb.keyfile --direction rx-tx 3ab76aab-2f26-4560-bf78-5a00430b91ce [ceph: root@cna1 /]# rbd mirror pool info datasp Mode: image Site Name: sitea Peer Sites: UUID: 3ab76aab-2f26-4560-bf78-5a00430b91ce Name: siteb Mirror UUID: Direction: rx-tx Client: client.rbd-mirror-peer [ceph: root@cna1 /]# rbd mirror snapshot schedule add --pool datasp 5m [ceph: root@cna1 /]# rbd mirror image enable datasp/imagesp1 snapshot Mirroring enabled [ceph: root@cna1 /]# rbd mirror pool status datasp health: OK daemon health: OK image health: OK images: 2 total 2 replaying [ceph: root@cna1 /]# [ceph: root@cna1 /]# rbd mirror image status datasp/imagesp1 imagesp1: global_id: 3ae91a9f-69f7-4261-90f5-d72fb14d5c24 state: up+stopped description: local image is primary service: cna3.ohukie on cna3 last_update: 2022-06-28 18:16:18 peer_sites: name: siteb state: up+replaying description: replaying, {"bytes_per_second":0.0,"bytes_per_snapshot":0.0,"remote_snapshot_timestamp":1656438627,"replay_state":"idle"} last_update: 2022-06-28 18:16:29 snapshots: 4 .mirror.primary.3ae91a9f-69f7-4261-90f5-d72fb14d5c24.596a9bfa-7ccc-4507-ae9a-2b99b3d78caf (peer_uuids:[]) [ceph: root@cna1 /]# [ceph: root@cna1 /]# rbd info datasp/imagesp1 rbd image 'imagesp1': size 1 GiB in 256 objects order 22 (4 MiB objects) snapshot_count: 1 id: 3a1cfb393d15 block_name_prefix: rbd_data.3a1cfb393d15 format: 2 features: exclusive-lock, object-map, fast-diff op_features: flags: create_timestamp: Tue Jun 28 17:50:06 2022 access_timestamp: Tue Jun 28 17:50:06 2022 modify_timestamp: Tue Jun 28 17:50:06 2022 mirroring state: enabled mirroring mode: snapshot mirroring global id: 3ae91a9f-69f7-4261-90f5-d72fb14d5c24 mirroring primary: true # remarque : on retrouve les informations du mirroring [ceph: root@cna1 /]# # datasp/imagesp1 est primaire sur sitea [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer info datasp/imagesp1 |grep primary mirroring primary: true [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer info datasp/imagesp2 |grep primary features: exclusive-lock, object-map, fast-diff, non-primary mirroring primary: false [root@cephclt ~]# [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer device map datasp/imagesp1 /dev/rbd0 [root@cephclt ~]# mkfs.xfs /dev/rbd0 meta-data=/dev/rbd0 isize=512 agcount=8, agsize=32768 blks = sectsz=512 attr=2, projid32bit=1 = crc=1 finobt=1, sparse=1, rmapbt=0 = reflink=1 bigtime=0 inobtcount=0 data = bsize=4096 blocks=262144, imaxpct=25 = sunit=16 swidth=16 blks naming =version 2 bsize=4096 ascii-ci=0, ftype=1 log =internal log bsize=4096 blocks=2560, version=2 = sectsz=512 sunit=16 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 Discarding blocks...Done. [root@cephclt ~]# mount /dev/rbd0 /work [root@cephclt ~]# echo sitea prod > /work/prod [root@cephclt ~]# umount /work [root@cephclt ~]# [root@cephclt ~]# [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer device unmap /dev/rbd0 [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror image demote datasp/imagesp1 Image demoted to non-primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer mirror image promote datasp/imagesp1 Image promoted to primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer info datasp/imagesp1 |grep primary mirroring primary: true [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer device map datasp/imagesp1 /dev/rbd0 [root@cephclt ~]# mount /dev/rbd0 /work [root@cephclt ~]# cat /work/prod sitea prod [root@cephclt ~]# echo siteb prod >>/work/prod [root@cephclt ~]# umount /work [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer device unmap /dev/rbd0 [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer mirror image demote datasp/imagesp1 Image demoted to non-primary [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror image promote datasp/imagesp1 Image promoted to primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer info datasp/imagesp1 |grep primary features: exclusive-lock, object-map, fast-diff, non-primary mirroring primary: false [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer info datasp/imagesp1 |grep primary mirroring primary: true [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer device map datasp/imagesp1 /dev/rbd0 [root@cephclt ~]# mount /dev/rbd0 /work [root@cephclt ~]# cat /work/prod sitea prod siteb prod [root@cephclt ~]# # retour au sitea en prod. # faire la même chose avec imagesp2 suivre les changements depuis le dashboard # snapshot # Remarque : les snapshots fond parties images rbd, ils sont egalement mirrorés. [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer device map datasp/imagesp1 /dev/rbd0 [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer device ls id pool namespace image snap device 0 datasp imagesp1 - /dev/rbd0 [root@cephclt ~]# mount /dev/rbd0 /work [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer snap create datasp/imagesp1@spana1 Creating snap: 100% complete...done. [root@cephclt ~]# echo siteasnapa1 >>/work/prod [root@cephclt ~]# umount /work [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer device unmap /dev/rbd0 [root@cephclt ~]# rbd --cluster sitea --id rbd-mirror-peer mirror image demote datasp/imagesp1 Image demoted to non-primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer mirror image promote datasp/imagesp1 Image promoted to primary [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer info datasp/imagesp1 |grep primary mirroring primary: true [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer device map datasp/imagesp1 /dev/rbd0 [root@cephclt ~]# mount /dev/rbd0 /work [root@cephclt ~]# cat /work/prod sitea prod siteb prod siteasnapa1 [root@cephclt ~]# rbd --cluster siteb --id rbd-mirror-peer snap list datasp/imagesp1 SNAPID NAME SIZE PROTECTED TIMESTAMP 95 spana1 1 GiB Fri Aug 26 17:45:01 2022 # Memo des commandes # vérifier le status de la réplication rbd mirror pool status data data # vérifier si l'image est master avant de la monter rbd --cluster sitea --id rbd-mirror-peer info datasp/imagesp1 |grep primary # vérifier le status de réplication du pool data [ceph: root@cnb1 /]# rbd mirror pool status data # Documentation https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/5/html/block_device_guide/index