에이... 더러워서....


2년 전 버전 써야겠다.

와... 버그 진짜 많은거...




https://www.ovirt.org/develop/release-management/features/integration/ovirtappliance.html

engine-setup --offline --config-append=ovirt-engine-answers


FQDN 추가

https://access.redhat.com/documentation/ko-kr/red_hat_virtualization/4.1/html/installation_guide/connecting_to_the_administration_portal



https://atl.kr/dokuwiki/doku.php/internal_user_management


Vdsm is throwing an exception when parsing the XML from the gluster volume info when using the latest gluster version 10.



[ovirt-users] [IMPORTANT] Upgrade to postgresql-jdbc-42.2.14-1 breaks oVirt Engine 4.4/4.5

https://lists.ovirt.org/archives/list/users@ovirt.org/message/SBCWNXLFLJBKTA3TFJARE7QCYZQ6QMMH/





https://www.oss.kr/storage/app/public/oss/bb/13/007_[GlusterFS]%20Solution%20Guide%20V0.4_20181203.pdf



# systemctl enable --now chronyd

# cat /etc/hosts
192.168.16.7    n1.ovirt.local
192.168.16.8    n2.ovirt.local
192.168.16.9    n3.ovirt.local
...
192.168.16.16   engine.ovirt.local

## at n1.ovirt.local
# ssh-keygen
# ssh-copy-id n1.ovirt.local
# ssh-copy-id n2.ovirt.local
# ssh-copy-id n3.ovirt.local


# lsblk
NAME                                                 MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sda                                                    8:0    0 223.6G  0 disk
├─sda1                                                 8:1    0   600M  0 part /boot/efi
├─sda2                                                 8:2    0     1G  0 part /boot
└─sda3                                                 8:3    0   222G  0 part
  ├─onn-pool00_tmeta                                 253:0    0     1G  0 lvm
  │ └─onn-pool00-tpool                               253:2    0   173G  0 lvm
  │   ├─onn-ovirt--node--ng--4.5.0.1--0.20220426.0+1 253:3    0   136G  0 lvm  /
  │   ├─onn-pool00                                   253:5    0   173G  1 lvm
  │   ├─onn-home                                     253:6    0     1G  0 lvm  /home
  │   ├─onn-tmp                                      253:7    0     1G  0 lvm  /tmp
  │   ├─onn-var                                      253:8    0     5G  0 lvm  /var
  │   ├─onn-var_crash                                253:9    0    10G  0 lvm  /var/crash
  │   ├─onn-var_log                                  253:10   0     8G  0 lvm  /var/log
  │   ├─onn-var_log_audit                            253:11   0     2G  0 lvm  /var/log/audit
  │   └─onn-var_tmp                                  253:12   0    10G  0 lvm
  ├─onn-pool00_tdata                                 253:1    0   173G  0 lvm
  │ └─onn-pool00-tpool                               253:2    0   173G  0 lvm
  │   ├─onn-ovirt--node--ng--4.5.0.1--0.20220426.0+1 253:3    0   136G  0 lvm  /
  │   ├─onn-pool00                                   253:5    0   173G  1 lvm
  │   ├─onn-home                                     253:6    0     1G  0 lvm  /home
  │   ├─onn-tmp                                      253:7    0     1G  0 lvm  /tmp
  │   ├─onn-var                                      253:8    0     5G  0 lvm  /var
  │   ├─onn-var_crash                                253:9    0    10G  0 lvm  /var/crash
  │   ├─onn-var_log                                  253:10   0     8G  0 lvm  /var/log
  │   ├─onn-var_log_audit                            253:11   0     2G  0 lvm  /var/log/audit
  │   └─onn-var_tmp                                  253:12   0    10G  0 lvm
  └─onn-swap                                         253:4    0     4G  0 lvm  [SWAP]
sdb                                                    8:16   0   3.5T  0 disk
├─sdb1                                                 8:17   0   100G  0 part /gluster_bricks/engine
└─sdb2                                                 8:18   0   3.4T  0 part /gluster_bricks/vmstore



mkdir -p /gluster_bricks/{engine,vmstore,data}
rm -rf /gluster_bricks/engine/engine /gluster_bricks/vmstore/vmstore

# gluster volume create engine replica 3 n1.ovirt.local:/gluster_bricks/engine/engine n2.ovirt.local:/gluster_bricks/engine/engine n3.ovirt.local:/gluster_bricks/engine/engine;

# gluster volume create vmstore replica 3 n1.ovirt.local:/gluster_bricks/vmstore/vmstore n2.ovirt.local:/gluster_bricks/vmstore/vmstore n3.ovirt.local:/gluster_bricks/vmstore/vmstore


gluster volume info

[root@n1 ]# gluster volume info

Volume Name: engine
Type: Replicate
Volume ID: 2528d980-6bbd-474e-a0c9-032e36fa5633
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: n1.ovirt.local:/gluster_bricks/engine/engine
Brick2: n2.ovirt.local:/gluster_bricks/engine/engine
Brick3: n3.ovirt.local:/gluster_bricks/engine/engine
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off

Volume Name: vmstore
Type: Replicate
Volume ID: 87ac4eae-efdc-4edf-9ba0-ab1d56b63ad7
Status: Created
Snapshot Count: 0
Number of Bricks: 1 x 3 = 3
Transport-type: tcp
Bricks:
Brick1: n1.ovirt.local:/gluster_bricks/vmstore/vmstore
Brick2: n2.ovirt.local:/gluster_bricks/vmstore/vmstore
Brick3: n3.ovirt.local:/gluster_bricks/vmstore/vmstore
Options Reconfigured:
cluster.granular-entry-heal: on
storage.fips-mode-rchecksum: on
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off



[root@n1 /]# tree /var/lib/glusterd/
/var/lib/glusterd/
├── bitd
├── events
├── geo-replication
│   └── gsyncd_template.conf
├── glusterd.info
├── glusterd.upgrade
├── glusterfind
├── glustershd
├── groups
│   ├── db-workload
│   ├── distributed-virt
│   ├── gluster-block
│   ├── metadata-cache
│   ├── nl-cache
│   ├── samba
│   └── virt
├── hooks
│   └── 1
│       ├── add-brick
│       │   ├── post
│       │   │   ├── disabled-quota-root-xattr-heal.sh
│       │   │   ├── S10selinux-label-brick.sh
│       │   │   └── S13create-subdir-mounts.sh
│       │   └── pre
│       │       └── S28Quota-enable-root-xattr-heal.sh
│       ├── create
│       │   ├── post
│       │   │   └── S10selinux-label-brick.sh
│       │   └── pre
│       ├── delete
│       │   ├── post
│       │   │   └── S57glusterfind-delete-post -> /usr/libexec/glusterfs/glusterfind/S57glusterfind-delete-post.py
│       │   └── pre
│       │       └── S10selinux-del-fcontext.sh
│       ├── gsync-create
│       │   ├── post
│       │   │   └── S56glusterd-geo-rep-create-post.sh
│       │   └── pre
│       ├── remove-brick
│       │   ├── post
│       │   └── pre
│       ├── reset
│       │   ├── post
│       │   └── pre
│       ├── set
│       │   ├── post
│       │   │   ├── S30samba-set.sh
│       │   │   └── S32gluster_enable_shared_storage.sh
│       │   └── pre
│       ├── start
│       │   ├── post
│       │   │   ├── S29CTDBsetup.sh
│       │   │   └── S30samba-start.sh
│       │   └── pre
│       └── stop
│           ├── post
│           └── pre
│               ├── S29CTDB-teardown.sh
│               └── S30samba-stop.sh
├── options
├── peers
│   ├── a9cc83f8-049d-422f-a226-046bedb8f3d8
│   └── e9b9fb96-5f03-4303-a412-f96a1141d406
├── quotad
├── scrub
├── snaps
│   └── missed_snaps_list
├── ss_brick
└── vols
    ├── engine
    │   ├── bricks
    │   │   ├── n1.ovirt.local:-gluster_bricks-engine-engine
    │   │   ├── n2.ovirt.local:-gluster_bricks-engine-engine
    │   │   └── n3.ovirt.local:-gluster_bricks-engine-engine
    │   ├── cksum
    │   ├── engine.gfproxyd.vol
    │   ├── engine.n1.ovirt.local.gluster_bricks-engine-engine.vol
    │   ├── engine.n2.ovirt.local.gluster_bricks-engine-engine.vol
    │   ├── engine.n3.ovirt.local.gluster_bricks-engine-engine.vol
    │   ├── engine-shd.vol
    │   ├── engine.tcp-fuse.vol
    │   ├── info
    │   ├── node_state.info
    │   ├── snapd.info
    │   ├── trusted-engine.tcp-fuse.vol
    │   └── trusted-engine.tcp-gfproxy-fuse.vol
    └── vmstore
        ├── bricks
        │   ├── n1.ovirt.local:-gluster_bricks-vmstore-vmstore
        │   ├── n2.ovirt.local:-gluster_bricks-vmstore-vmstore
        │   └── n3.ovirt.local:-gluster_bricks-vmstore-vmstore
        ├── cksum
        ├── info
        ├── node_state.info
        ├── snapd.info
        ├── trusted-vmstore.tcp-fuse.vol
        ├── trusted-vmstore.tcp-gfproxy-fuse.vol
        ├── vmstore.gfproxyd.vol
        ├── vmstore.n1.ovirt.local.gluster_bricks-vmstore-vmstore.vol
        ├── vmstore.n2.ovirt.local.gluster_bricks-vmstore-vmstore.vol
        ├── vmstore.n3.ovirt.local.gluster_bricks-vmstore-vmstore.vol
        ├── vmstore-shd.vol
        └── vmstore.tcp-fuse.vol

45 directories, 58 files

hosted engine 만들 때 용량 작다고 나오면...

onn-var_tmp 용량이 작아서 그런거임

[root@n1 var]# lsblk
NAME                                                 MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT
sda                                                    8:0    0 223.6G  0 disk
├─sda1                                                 8:1    0   600M  0 part /boot/efi
├─sda2                                                 8:2    0     1G  0 part /boot
└─sda3                                                 8:3    0   222G  0 part
  ├─onn-pool00_tmeta                                 253:0    0     1G  0 lvm
  │ └─onn-pool00-tpool                               253:2    0   173G  0 lvm
  │   ├─onn-ovirt--node--ng--4.5.0.1--0.20220426.0+1 253:3    0   136G  0 lvm  /
  │   ├─onn-pool00                                   253:5    0   173G  1 lvm
  │   ├─onn-home                                     253:6    0     1G  0 lvm  /home
  │   ├─onn-tmp                                      253:7    0     1G  0 lvm  /tmp
  │   ├─onn-var                                      253:8    0     5G  0 lvm  /var
  │   ├─onn-var_crash                                253:9    0    10G  0 lvm  /var/crash
  │   ├─onn-var_log                                  253:10   0     8G  0 lvm  /var/log
  │   ├─onn-var_log_audit                            253:11   0     2G  0 lvm  /var/log/audit
  │   └─onn-var_tmp                                  253:12   0    10G  0 lvm  /var/tmp
  ├─onn-pool00_tdata                                 253:1    0   173G  0 lvm
  │ └─onn-pool00-tpool                               253:2    0   173G  0 lvm
  │   ├─onn-ovirt--node--ng--4.5.0.1--0.20220426.0+1 253:3    0   136G  0 lvm  /
  │   ├─onn-pool00                                   253:5    0   173G  1 lvm
  │   ├─onn-home                                     253:6    0     1G  0 lvm  /home
  │   ├─onn-tmp                                      253:7    0     1G  0 lvm  /tmp
  │   ├─onn-var                                      253:8    0     5G  0 lvm  /var
  │   ├─onn-var_crash                                253:9    0    10G  0 lvm  /var/crash
  │   ├─onn-var_log                                  253:10   0     8G  0 lvm  /var/log
  │   ├─onn-var_log_audit                            253:11   0     2G  0 lvm  /var/log/audit
  │   └─onn-var_tmp                                  253:12   0    10G  0 lvm  /var/tmp
  └─onn-swap                                         253:4    0     4G  0 lvm  [SWAP]
umount /var/tmp
rm -rf /var/tmp
mkdir /var_tmp
ln -s /var_tmp /var/tmp

hosted engine 만들어지는 중에는

virsh 되네

[root@n1 /]# virsh list --all
 Id   Name                State
-----------------------------------
 1    HostedEngineLocal   running
  • No labels