ANF CEPH 2022 du 03 au 07/10/2022 Sébastien Geiger [ceph: root@ceph1 /]# radosgw-admin realm create --rgw-realm=demodom --default [ceph: root@ceph1 /]# #radosgw-admin zonegroup delete --rgw-zonegroup=default [ceph: root@ceph1 /]# radosgw-admin zonegroup create --rgw-zonegroup=euzone --endpoints=http://ceph3:80 --master --default [ceph: root@ceph1 /]# radosgw-admin zone create --rgw-zonegroup=euzone --rgw-zone=fr-east-1 --endpoints=http://ceph3:80 [ceph: root@ceph1 /]# radosgw-admin user create --uid=zone.user --display-name="Zone user" --system --access-key="s3frKeyAcc" --secret="s3frSecret" [ceph: root@ceph1 /]# radosgw-admin zone modify --rgw-zone=fr-east-1 --access-key=s3frKeyAcc --secret=s3frSecret [ceph: root@ceph1 /]# radosgw-admin period update --commit [ceph: root@ceph1 /]# ceph orch apply rgw euzone --realm=demodom --zone=fr-east-1 --placement=ceph3 [ceph: root@ceph1 /]# radosgw-admin user create --uid=johndoe --display-name="John Doe" --access-key="s3johnKeyAcc" --secret="s3johnKeyMagaSecret" depuis cephclt [almalinux@cephclt ~]$ sudo dnf install -y epel-release [almalinux@cephclt ~]$ sudo dnf install -y awscli #creer le fichier s3cfg.txt export AWS_ACCESS_KEY_ID=s3johnKeyAcc export AWS_SECRET_ACCESS_KEY=s3johnKeyMagaSecret export AWS_DEFAULT_REGION=fr-east-1 alias awsa='aws --endpoint-url http://ceph3' export PYTHONWARNINGS="ignore" source s3cfg.txt awsa s3api create-bucket --bucket mybucket awsa s3 ls 2022-05-02 10:26:11 mybucket awsa s3 cp /etc/hosts s3://mybucket/hostceph3 upload: ../../etc/hosts to s3://mybucket/hostceph3 awsa s3 ls s3://mybucket 2022-05-07 18:25:35 368 hostceph3 # install de la deuxieme rgw sur ceph4 [ceph: root@ceph1 /]# ceph orch host label add ceph3 rgwdemo [ceph: root@ceph1 /]# ceph orch host label add ceph4 rgwdemo [ceph: root@ceph1 /]# ceph orch host ls HOST ADDR LABELS STATUS ceph1 172.16.7.125 _admin ceph2 172.16.7.245 _admin ceph3 172.16.7.180 rgwdemo ceph4 172.16.7.67 rgwdemo 4 hosts in cluster [ceph: root@ceph1 /]# ceph orch apply rgw euzone demodom --placement=label:rgwdemo Scheduled rgw.demodom update... [ceph: root@ceph1 /]# ceph orch ls |grep rgw rgw.euzone ?:80 2/2 7m ago 12s label:rgwdemo # vérification [ceph: root@ceph1 /]# curl http://ceph3:80 |xmllint --format - [ceph: root@ceph1 /]# curl http://ceph4:80 |xmllint --format - # depuis cephclt alias awsa='aws --endpoint-url http://ceph3' alias awsb='aws --endpoint-url http://ceph4' awsa s3 ls s3://mybucket 2022-05-07 18:25:35 368 hostceph3 awsb s3 ls s3://mybucket 2022-05-07 18:25:35 368 hostceph3 awsb s3 cp /etc/hosts s3://mybucket/hostceph4 upload: ../../etc/hosts to s3://mybucket/hostceph4 awsa s3 ls s3://mybucket 2022-05-07 18:25:35 368 hostceph3 2022-05-07 18:46:50 368 hostceph4 # configuration https en mode HA de rgw [almalinux@ceph1 ~]$ openssl req -x509 -nodes -days 365 -out rgwha.crt -keyout rgwha.key Country Name (2 letter code) [XX]:fr State or Province Name (full name) []:alsace Locality Name (eg, city) [Default City]:strasbourg Organization Name (eg, company) [Default Company Ltd]:cephlab Organizational Unit Name (eg, section) []: Common Name (eg, your name or your server's hostname) []:rgwha.novalocal cat rgwha.crt rgwha.key >rgwha.yaml réccupérer l'ip de la vip avec la commande, puis changer la laveur dans le fichier ci-dessous getent ahostsv4 rgwha |head -n 1| awk '{ print $1 }' # editer le fichier rgwha.yaml pour qu'il ressemble a ceci cat rgwha.yaml service_type: ingress service_id: rgwhademo placement: hosts: - ceph1 - ceph2 spec: backend_service: rgw.euzone virtual_ip: 192.168.111.14/24 #<< ici remplacer par votre vip frontend_port: 443 monitor_port: 1967 ssl_cert: | -----BEGIN CERTIFICATE----- MIIDxzCCAq+gAwIBAgIUPB57k0KuWd3Tbp4qNo2meyJV7m0wDQYJKoZIhvcNAQEL ... heSMHZro5QphvSg= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDJ2p0L16PVqkKt ... 9B48bfj5CwHeeNQKn1SurA== -----END PRIVATE KEY----- #vim indent 2 space :set shiftwidth=2 shift v puis selectionner toutes les lignes puis entre, puis 2 puis > # réccupérer le ficchier depuis le ct depuis /mnt sudo cephadm shell -m /home/almalinux/rgwha.yaml [ceph: root@ceph1 /]# ceph orch apply -i /mnt/rgwha.yaml Scheduled ingress.rgwhademo update... [ceph: root@ceph1 /]# ceph orch ls |grep rgw ingress.rgwhademo 172.16.7.248:443,1967 4/4 7m ago 7m ceph1;ceph2 rgw.euzone ?:80 2/2 6m ago 2h label:rgwdemo [ceph: root@ceph1 /]# curl https://rgwha --insecure |xmllint --format - # changer l'alias pour passer en mode https [almalinux@cephclt ~]$ alias aws='aws --no-verify-ssl --endpoint-url https://rgwha' [almalinux@cephclt ~]$ aws s3 ls mybucket 2022-06-20 07:09:39 372 hostceph3 2022-06-20 07:13:35 372 hostceph4 # vérifier le basculement de la virtual_ip # trouver le node ou est l'ip avec la commande ip ad sur ceph1, ceph2 [ceph: root@ceph1 /]# ceph orch ps |grep haproxy haproxy.rgwhademo.ceph1.loypkk ceph1 *:443,1967 running (38m) 5m ago 38m 25.1M - 2.3.21-3ce4ee0 7ecd3fda00f4 de5b9d14cd0d haproxy.rgwhademo.ceph2.ozdrwc ceph2 *:443,1967 running (38m) 6m ago 38m 9680k - 2.3.21-3ce4ee0 7ecd3fda00f4 3c510b716d37 [root@ceph1 ~]# ip ad |grep 172 inet 172.16.7.125/24 brd 172.16.7.255 scope global dynamic noprefixroute eth0 inet 172.16.7.248/24 scope global secondary eth0 # lorsque vous avez determiné quel conteneur utilise la vip, arrêter le service en fonction du nom de votre environement fournit par la commande ceph orch ps [ceph: root@ceph1 /]# ceph orch daemon stop haproxy.rgwhademo.ceph1.loypkk # vérifier que le client aws peux accéder a la virtual_ip [almalinux@cephclt ~]$ aws s3 ls mybucket 2022-06-20 07:09:39 372 hostceph3 2022-06-20 07:13:35 372 hostceph4 # verifier que la virtual_ip à été déplacé sur les 2 nodes ceph1,ceph2 avec la commande ip ad [root@ceph2 ~]# ip ad |grep 172 inet 172.16.7.245/24 brd 172.16.7.255 scope global dynamic noprefixroute eth0 inet 172.16.7.248/24 scope global secondary eth0 # remarque: la vip (ici 172.16.7.248) est passe de ceph1 à ceph2 [ceph: root@ceph1 /]# ceph orch daemon start haproxy.rgwhademo.ceph1.loypkk [ceph: root@ceph1 /]# ceph orch ps |grep haproxy # remarque les 2 conteneurs sont demarrés #S3 TAG [almalinux@cephclt ~]$ echo $(date) >filetag.txt [almalinux@cephclt ~]$ aws s3api put-object --bucket mybucket --key filetag --body filetag.txt { "ETag": "\"5d278aa541e21fe7863f907bc76d44f5\"" } [almalinux@cephclt ~]$ aws s3api put-object-tagging --bucket mybucket --key filetag --tagging 'TagSet=[{Key=myk1,Value=myval1}]' [almalinux@cephclt ~]$ aws s3api get-object-tagging --bucket mybucket --key filetag { "TagSet": [ { "Key": "myk1", "Value": "myval1" } ] } [almalinux@cephclt ~]$ aws s3api delete-object --bucket mybucket --key filetag #Elastic sync module Depuis la version Kraken de Ceph, il est possible d'indexer les données depuis les métadonnées et effectuer des recherches via ElasticSearch. Les métadonnées sont exporter vers Elasticsearch via le module elastic-sync-module des radosgw. pour plus informations voir : https://docs.ceph.com/en/latest/radosgw/elastic-sync-module/ # S3 quota # les commandes de gestion des quota utilisateur # radosgw-admin quota set --quota-scope=user --uid= [--max-objects=] [--max-size=] # radosgw-admin quota enable --quota-scope=user --uid= # radosgw-admin quota set --uid= --quota-scope=bucket [--max-objects=] [--max-size= #user: johndoe [almalinux@cephclt ~]$ awsa s3api create-bucket --bucket bquota [ceph: root@ceph1 /]# radosgw-admin quota set --uid=johndoe --quota-scope=bucket --max-size=700000000 --bucket=bquota [ceph: root@ceph1 /]# radosgw-admin quota enable --quota-scope=bucket --uid=johndoe --bucket=bquota [ceph: root@ceph1 /]# radosgw-admin bucket stats --bucket=bquota [ceph: root@ceph1 /]# radosgw-admin user stats --uid=johndoe --sync-stats [almalinux@cephclt ~]$ dd if=/dev/urandom of=f500mo.txt bs=1048576 count=500 [almalinux@cephclt ~]$ awsa s3 cp f500mo.txt s3://bquota/f500mo.txt upload: ./f500mo.txt to s3://bquota/f500mo.txt [almalinux@cephclt ~]$ awsa s3 cp f500mo.txt s3://bquota/f500mov2.txt upload failed: ./f500mo.txt to s3://bquota/f500mov2.txt An error occurred (QuotaExceeded) when calling the UploadPart operation: Unknown # STORAGE CLASSES [ceph: root@ceph1 /]# radosgw-admin zonegroup placement add --rgw-zonegroup euzone --placement-id default-placement --storage-class COLD [ceph: root@ceph1 /]# radosgw-admin zone placement add --rgw-zone fr-east-1 --placement-id default-placement --storage-class COLD --data-pool fr-east-1.rgw.cold.data --compression lz4 [ceph: root@ceph1 /]# ceph osd pool create fr-east-1.rgw.cold.data 16 erasure [ceph: root@ceph1 /]# ceph osd pool application enable fr-east-1.rgw.cold.data rgw [ceph: root@ceph1 /]# radosgw-admin period update --commit ... "placement_targets": [ { "name": "default-placement", "tags": [], "storage_classes": [ "COLD", "STANDARD" ] ... [almalinux@cephclt ~]$ vi lifecycle.json { "Rules": [ { "ID": "caocf1u97nsvoi1ajo70", "Prefix": "", "Status": "Enabled", "Transitions": [ { "Days": 1, "StorageClass": "COLD" } ] } ] } [almalinux@cephclt ~]$ aws s3api put-bucket-lifecycle-configuration --bucket mybucket --lifecycle-configuration file://lifecycle.json [almalinux@cephclt ~]$ aws s3api get-bucket-lifecycle-configuration --bucket mybucket { "Rules": [ { "ID": "caocf1u97nsvoi1ajo70", "Prefix": "", "Status": "Enabled", "Transitions": [ { "Days": 1, "StorageClass": "COLD" } ] } ] } [ceph: root@ceph1 /]# radosgw-admin lc get --bucket mybucket ... "transitions": { "COLD": { "days": "1", "date": "", "storage_class": "COLD" } ... [ceph: root@ceph1 /]# radosgw-admin lc process [almalinux@cephclt ~]$ aws s3api list-objects --bucket mybucket ... "Contents": [ { "Key": "hostceph3", "LastModified": "2022-06-20T10:22:11.045Z", "ETag": "\"4d1f08d5edcd3efb9b784ed84e3f1316\"", "Size": 372, "StorageClass": "STANDARD", "Owner": { "DisplayName": "John Doe", "ID": "johndoe" } }, ... # remarque: le délacement est assuré en tâche de fond. il faudrait véifier aprè la prériode de transition. # Documentation https://access.redhat.com/documentation/fr-fr/red_hat_ceph_storage/5/html/object_gateway_guide/administration # apres 2 jours la commande aws s3api list-objects --bucket mybucket retourne effectivement le changement de storageClass ... "Contents": [ { "Key": "hostceph3", "LastModified": "2022-08-20T10:22:11.045Z", "ETag": "\"4d1f08d5edcd3efb9b784ed84e3f1316\"", "Size": 372, "StorageClass": "COLD", "Owner": { "DisplayName": "John Doe", "ID": "johndoe" } }, ... # versionning [almalinux@cephclt ~]$ awsa s3api create-bucket --bucket bvers [almalinux@cephclt ~]$ echo version1 >file.txt [almalinux@cephclt ~]$ awsa s3 cp file.txt s3://bvers/file.txt upload: ./file.txt to s3://bvers/file.txt [almalinux@cephclt ~]$ awsa s3 cp s3://bvers/file.txt file1.txt download: s3://bvers/file.txt to ./file1.txt [almalinux@cephclt ~]$ cat ./file1.txt version1 [almalinux@cephclt ~]$ awsa s3 ls s3://bvers [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers { "Contents": [ { "Key": "file.txt", "LastModified": "2022-06-22T14:59:10.602Z", "ETag": "\"33b051d0058d645e334a7056e253e1c5\"", "Size": 9, "StorageClass": "STANDARD", "Owner": { "DisplayName": "John Doe", "ID": "johndoe" } } ] } [almalinux@cephclt ~]$ awsa s3 ls s3://bvers [almalinux@cephclt ~]$ awsa s3 rm s3://bvers/file.txt [almalinux@cephclt ~]$ awsa s3 ls s3://bvers [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers #activation versionning [almalinux@cephclt ~]$ awsa s3api get-bucket-versioning --bucket bvers [almalinux@cephclt ~]$ awsa s3api put-bucket-versioning --bucket bvers --versioning-configuration Status=Enabled [almalinux@cephclt ~]$ awsa s3api get-bucket-versioning --bucket bvers { "Status": "Enabled", "MFADelete": "Disabled" } [almalinux@cephclt ~]$ awsa s3 cp file.txt s3://bvers/file.txt [almalinux@cephclt ~]$ echo version2 >file.txt [almalinux@cephclt ~]$ awsa s3 cp file.txt s3://bvers/file.txt [almalinux@cephclt ~]$ awsa s3 ls s3://bvers [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers ... "ETag": "\"2a361dabe3e2dfa3feb1aaef167d8e28\"", "Key": "file.txt", "VersionId": "IuuTtyGHqjfrVgGUS6SuYMqNMHuCYpw", "IsLatest": true, ... "ETag": "\"33b051d0058d645e334a7056e253e1c5\"", "Key": "file.txt", "VersionId": "DzC4QLKxoOpSrMaYFKqJPkoEqAdlpSm", "IsLatest": false, ... # remarque: les différentes version sont listé, et la derniere version a "IsLatest": true, [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers --prefix file.txt --query 'Versions[?IsLatest].[VersionId]' ... "IuuTtyGHqjfrVgGUS6SuYMqNMHuCYpw" ... [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers --prefix file.txt --query 'Versions[?IsLatest].[Key,VersionId,LastModified]' # recupération de la premiere version du fichier, celle qui n'est pas IsLatest: true # pour cela identifier la VersionId en fonction de votre environement [almalinux@cephclt ~]$ awsa s3api get-object --bucket bvers --key file.txt --version-id "DzC4QLKxoOpSrMaYFKqJPkoEqAdlpSm" filegv.txt { "AcceptRanges": "bytes", "LastModified": "Wed, 22 Jun 2022 15:51:04 GMT", "ContentLength": 9, "ETag": "\"33b051d0058d645e334a7056e253e1c5\"", "VersionId": "DzC4QLKxoOpSrMaYFKqJPkoEqAdlpSm", "ContentType": "text/plain", "Metadata": {} } [almalinux@cephclt ~]$ cat filegv.txt version1 # remarque: il est possible de récupérer une version spécifique du fichier. #supression [almalinux@cephclt ~]$ awsa s3 rm s3://bvers/file.txt [almalinux@cephclt ~]$ awsa s3 ls s3://bvers/ # restaurer la derniere version [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers # lorsque le fichier est supprimer il est dans la zone "DeleteMarkers" ... "DeleteMarkers": [ "Key": "file.txt", "VersionId": "3yoYo9xPEIx7HQKKzbxbF2YhqPnVtq3", "IsLatest": true, ... [almalinux@cephclt ~]$ awsa s3api delete-object --bucket bvers --key file.txt --version-id "3yoYo9xPEIx7HQKKzbxbF2YhqPnVtq3" { "DeleteMarker": true, "VersionId": "3yoYo9xPEIx7HQKKzbxbF2YhqPnVtq3" } [almalinux@cephclt ~]$ awsa s3 ls s3://bvers/ [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers ... "Versions": [ "VersionId": "IuuTtyGHqjfrVgGUS6SuYMqNMHuCYpw", "IsLatest": true, ... # remarque: la version précédente est a nouveau marque comme "IsLatest": true # supprimer définitivement une version spécifique [almalinux@cephclt ~]$ awsa s3api delete-object --bucket bvers --key file.txt --version-id "DzC4QLKxoOpSrMaYFKqJPkoEqAdlpSm" [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers # il est possible de supprimer définitivement une version sans quelle soit dans la zone DeleteMarker # gestion de l'expiration des objects # exemple de configuration. dans la pratique on peux garder les versions précédentes 30j avant leurs suppresions automatique. vi s3del.json { "Rules": [ { "Expiration": { "ExpiredObjectDeleteMarker": true }, "AbortIncompleteMultipartUpload" : { "DaysAfterInitiation" : 1 }, "ID": "keepoldversions", "Prefix": "", "Status": "Enabled", "NoncurrentVersionExpiration": { "NoncurrentDays": 1 } } ] } [almalinux@cephclt ~]$ awsa s3api put-bucket-lifecycle-configuration --bucket bvers --lifecycle-configuration file://s3del.json [almalinux@cephclt ~]$ awsa s3api get-bucket-lifecycle-configuration --bucket bvers { "Rules": [ { "Expiration": { "ExpiredObjectDeleteMarker": true }, "AbortIncompleteMultipartUpload" : { "DaysAfterInitiation" : 1 }, "ID": "keepoldversions", "Prefix": "", "Status": "Enabled", "NoncurrentVersionExpiration": { "NoncurrentDays": 1 } } ] } # remarque # ceph ne support pas NewerNoncurrentVersions pour lifecycle-configuration pour limiter le nombre de version a garder # suppresion de la rêgle [almalinux@cephclt ~]$ awsa s3api delete-bucket-lifecycle --bucket bvers # désactiver le versionning [almalinux@cephclt ~]$ awsa s3api put-bucket-versioning --bucket bvers --versioning-configuration Status=Suspended # remarque lorsque le version est suspendu, et lors de la suppression d'un objet, le champs versionId est "null" # supprimer un bucket avec du versionning dois être vide. [almalinux@cephclt ~]$ awsa s3 rb s3://bvers --force remove_bucket failed: s3://bvers An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: Unknown # remarque : le bucket n'est pas vraiment vide, il faut supprimer les elements [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bvers # astuce https://towardsthecloud.com/aws-cli-empty-s3-bucket # cela fonctionne avec [almalinux@cephclt ~]$ aws s3api delete-objects --bucket bvers --delete "$(aws s3api list-object-versions --bucket "bvers" --output=json --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')" [almalinux@cephclt ~]$ awsa s3api delete-object --bucket bvers --key file.txt --version-id "null" [almalinux@cephclt ~]$ awsa s3 rb s3://bvers --force remove_bucket: bvers # WORM remarque : la fonction WORM doit être activé lors de la création du bucket. [almalinux@cephclt ~]$ awsa s3api create-bucket --object-lock-enabled-for-bucket --bucket bworm3 [almalinux@cephclt ~]$ awsa s3api put-object-lock-configuration --bucket bworm3 --object-lock-configuration '{ "ObjectLockEnabled": "Enabled", "Rule": { "DefaultRetention": { "Mode": "COMPLIANCE", "Days": 5 }}}' [almalinux@cephclt ~]$ awsa s3api get-object-lock-configuration --bucket bworm3 { "ObjectLockConfiguration": { "ObjectLockEnabled": "Enabled", "Rule": { "DefaultRetention": { "Mode": "COMPLIANCE", "Days": 5 } } } } [almalinux@cephclt ~]$ awsa s3 cp /etc/hosts s3://bworm3/file3 upload: ../../etc/hosts to s3://bworm3/file3 [almalinux@cephclt ~]$ awsa s3api list-object-versions --bucket bworm3 { "Versions": [ { "ETag": "\"25edc86598ec69e5983880d8b627aca3\"", "Size": 372, "StorageClass": "STANDARD", "Key": "file3", "VersionId": "L54JH8eK0NWfZWinnC-rgQSiBHhC2i3", "IsLatest": true, "LastModified": "2022-09-20T16:09:55.754Z", "Owner": { "DisplayName": "John Doe", "ID": "johndoe" } } ] } [almalinux@cephclt ~]$ awsa s3api get-object-retention --bucket bworm3 --key file3 --version-id "L54JH8eK0NWfZWinnC-rgQSiBHhC2i3" { "Retention": { "Mode": "COMPLIANCE", "RetainUntilDate": "2022-06-28T19:22:32.304093934Z" } } [almalinux@cephclt ~]$ awsa s3api delete-object --bucket bworm3 --key file3 --version-id "L54JH8eK0NWfZWinnC-rgQSiBHhC2i3" An error occurred (AccessDenied) when calling the DeleteObject operation: forbidden by object lock # diminuer le temps de rétention [almalinux@cephclt ~]$ awsa s3 cp /etc/hosts s3://bworm3/file4 upload: ../../etc/hosts to s3://bworm3/file4 [almalinux@cephclt ~]$ awsa s3api get-object-retention --bucket bworm3 --key file4 [almalinux@cephclt ~]$ awsa s3api put-object-retention --bucket bworm3 --key file4 --retention '{"Mode":"COMPLIANCE","RetainUntilDate":"2022-06-27T18:42:11.385204325Z"}' An error occurred (AccessDenied) when calling the PutObjectRetention operation: proposed retain-until date shortens an existing retention period and governance bypass check failed # augmenter le temps de retension [almalinux@cephclt ~]$ awsa s3api put-object-retention --bucket bworm3 --key file4 --retention '{"Mode":"COMPLIANCE","RetainUntilDate":"2022-12-30T18:42:11.385204325Z"}' [almalinux@cephclt ~]$ awsa s3api get-object-retention --bucket bworm3 --key file4 { "Retention": { "Mode": "COMPLIANCE", "RetainUntilDate": "2022-12-30T18:42:11.385204000Z" } } # Remarque : il est possible de rajouter une rêgle de bucket-lifecycle-configuration comme vue précédemment # pour plus d'infos voir https://support.binero.com/knowledge-base/object-lock/?lang=en