dnf -y install centos-release-ceph-reef dnf -y install cephadm |
# cephadm bootstrap --mon-ip 192.168.16.10 cephadm bootstrap --mon-ip 192.168.10.10 Verifying podman|docker is present... ... Wrote config to /etc/ceph/ceph.conf Wrote keyring to /etc/ceph/ceph.client.admin.keyring … firewalld ready Enabling firewalld port 9283/tcp in current zone... Enabling firewalld port 8765/tcp in current zone... Enabling firewalld port 8443/tcp in current zone... … Wrote public SSH key to /etc/ceph/ceph.pub Adding key to root@localhost authorized_keys... Adding host c1... Deploying mon service with default placement... Deploying mgr service with default placement... Deploying crash service with default placement... Deploying ceph-exporter service with default placement... Deploying prometheus service with default placement... Deploying grafana service with default placement... Deploying node-exporter service with default placement... Deploying alertmanager service with default placement... … Ceph Dashboard is now available at: URL: https://c1:8443/ User: admin Password: ........ Enabling client.admin keyring and conf on hosts with "admin" label Saving cluster configuration to /var/lib/ceph/7592ec0a-3c7a-11ee-b450-a0369f70d4f8/config directory Enabling autotune for osd_memory_target You can access the Ceph CLI as following in case of multi-cluster or non-default config: sudo /usr/sbin/cephadm shell --fsid 7592ec0a-3c7a-11ee-b450-a0369f70d4f8 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring Or, if you are only running a single cluster on this host: sudo /usr/sbin/cephadm shell Please consider enabling telemetry to help improve Ceph: ceph telemetry on For more information see: https://docs.ceph.com/en/latest/mgr/telemetry/ Bootstrap complete. |
https://docs.ceph.com/en/latest/cephadm/
https://docs.ceph.com/en/latest/cephadm/install/#install-cephadm
Uninstall last ceph node
# systemctl status ceph<tab> ceph.target ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@alertmanager.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@ceph-exporter.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@crash.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@grafana.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@mgr.c1.hijqcf.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@mon.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@node-exporter.c1.service ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@prometheus.c1.service # systemctl status ceph.target ● ceph.target - All Ceph clusters and services Loaded: loaded (/etc/systemd/system/ceph.target; enabled; preset: disabled) Active: active since Thu 2023-08-17 06:19:24 KST; 44min ago # podman ps -a CONTAINER ID 10826ebf2f23 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-mon-c1 52728e97ed80 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-mgr-c1-hijqcf 362f58d17f80 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-ceph-exporter-c1 0997132bdd95 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-crash-c1 63cbf2dabf52 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-node-exporter-c1 7417b1f063c4 ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-prometheus-c1 d28c76dcc9cd ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-alertmanager-c1 74b3feaae01f ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8-grafana-c1 # find /etc/systemd/system/ -name 'ceph*' /etc/systemd/system/multi-user.target.wants/ceph.target /etc/systemd/system/multi-user.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target /etc/systemd/system/ceph.target /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target /etc/systemd/system/ceph.target.wants /etc/systemd/system/ceph.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@mon.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@mgr.c1.hijqcf.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@ceph-exporter.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@crash.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@node-exporter.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@alertmanager.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@grafana.c1.service /etc/systemd/system/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8.target.wants/ceph-7592ec0a-3c7a-11ee-b450-a0369f70d4f8@prometheus.c1.service # systemctl stop ceph.target # podman ps -a CONTAINER ID <nothing> # rm -rf /etc/ceph /var/lib/ceph /etc/systemd/system/ceph* # systemctl daemon-reload |