After normal configuration with drbd 9 version and reboot the server, the configuration is initialized

Hello, I’m making up drbd and I’m inquiring because there’s a problem.
The current version is rocky 9.5.

[root@cloud ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

# Engine Network #
192.168.12.100 cloud.engine.com
192.168.12.104 cloud.engine1.com
192.168.12.105 cloud.engine2.com
192.168.12.106 cloud.engine3.com

# OS Network #
192.168.12.101 cloud.host1.com
192.168.12.102 cloud.host2.com
192.168.12.103 cloud.host3.com

# DRBD Network #
111.111.111.104 engine1-cr
111.111.111.105 engine2-cr
111.111.111.106 engine3-cr


[root@cloud ~]# drbdadm status
r0 role:Primary
  volume:0 disk:UpToDate open:yes
  volume:1 disk:UpToDate open:yes
  volume:2 disk:UpToDate open:yes
  volume:3 disk:UpToDate open:yes
  volume:4 disk:UpToDate open:yes
  volume:5 disk:UpToDate open:yes
  volume:6 disk:UpToDate open:yes
  volume:7 disk:UpToDate open:yes
  volume:8 disk:UpToDate open:yes
  volume:9 disk:UpToDate open:yes
  volume:10 disk:UpToDate open:yes
  cloud.engine2.com role:Secondary
    volume:0 peer-disk:UpToDate
    volume:1 peer-disk:UpToDate
    volume:2 peer-disk:UpToDate
    volume:3 peer-disk:UpToDate
    volume:4 peer-disk:UpToDate
    volume:5 peer-disk:UpToDate
    volume:6 peer-disk:UpToDate
    volume:7 peer-disk:UpToDate
    volume:8 peer-disk:UpToDate
    volume:9 peer-disk:UpToDate
    volume:10 peer-disk:UpToDate
  cloud.engine3.com role:Secondary
    volume:0 peer-disk:UpToDate
    volume:1 peer-disk:UpToDate
    volume:2 peer-disk:UpToDate
    volume:3 peer-disk:UpToDate
    volume:4 peer-disk:UpToDate
    volume:5 peer-disk:UpToDate
    volume:6 peer-disk:UpToDate
    volume:7 peer-disk:UpToDate
    volume:8 peer-disk:UpToDate
    volume:9 peer-disk:UpToDate
    volume:10 peer-disk:UpToDate


[root@cloud ~]# lsmod | grep drbd
drbd_transport_tcp     28672  2
drbd                  991232  2 drbd_transport_tcp
libcrc32c              12288  5 nf_conntrack,nf_nat,nf_tables,xfs,drbd
[root@cloud ~]# rpm -qa | grep drbd
kmod-drbd9x-9.1.23-1.el9_5.elrepo.x86_64
drbd9x-utils-9.29.0-1.el9.elrepo.x86_64

[root@cloud ~]# cat /etc/drbd.d/r0.res
resource "r0" {
     protocol C;
     startup {
          wfc-timeout       0;      ## Infinite!
          degr-wfc-timeout  120;    ## 2 minutes.
     }
     disk {
        c-plan-ahead  20;
        c-min-rate 1000M;
        c-max-rate 1000M;
     }
  volume 0 {
    device    /dev/drbd1;
    disk      /dev/engine_vg1/etc-httpd;
    meta-disk internal;
  }
  volume 1 {
    device    /dev/drbd2;
    disk      /dev/engine_vg1/etc-ovirt-engine;
    meta-disk internal;
  }
  volume 2 {
    device    /dev/drbd3;
    disk      /dev/engine_vg1/etc-ovirt-engine-dwh;
    meta-disk internal;
  }
  volume 3 {
    device    /dev/drbd4;
    disk      /dev/engine_vg1/etc-pki-ovirt-engine;
    meta-disk internal;
  }
  volume 4 {
    device    /dev/drbd5;
    disk      /dev/engine_vg1/usr-share-ovirt-engine;
    meta-disk internal;
  }
  volume 5 {
    device    /dev/drbd6;
    disk      /dev/engine_vg1/usr-share-ovirt-engine-wildfly;
    meta-disk internal;
  }
  volume 6 {
    device    /dev/drbd7;
    disk      /dev/engine_vg1/var-lib-ovirt-engine;
    meta-disk internal;
  }
 volume 7 {
    device    /dev/drbd8;
    disk      /dev/engine_vg1/var-log-ovirt-engine;
    meta-disk internal;
  }
 volume 8 {
    device    /dev/drbd9;
    disk      /dev/engine_vg1/usr-share-doc-ovirt-engine;
    meta-disk internal;
  }
 volume 9 {
    device    /dev/drbd10;
    disk      /dev/engine_vg1/usr-share-ovirt-engine-keycloak;
    meta-disk internal;
  }
 volume 10 {
    device    /dev/drbd11;
    disk      /dev/engine_vg1/var-lib-pgsql;
    meta-disk internal;
  }
  on cloud.engine1.com {
    address   111.111.111.104:7789;
    node-id 1;
  }
  on cloud.engine2.com {
    address   111.111.111.105:7789;
    node-id 2;
  }
  on cloud.engine3.com {
    address   111.111.111.106:7789;
    node-id 3;
  }
connection-mesh {
  hosts cloud.engine1.com cloud.engine2.com cloud.engine3.com;
}

}

[root@cloud ~]# pcs status
Cluster name: engineHA
Cluster Summary:
  * Stack: corosync (Pacemaker is running)
  * Current DC: engine3-cr (version 2.1.8-3.el9-3980678f0) - partition with quorum
  * Last updated: Tue Feb 11 21:15:08 2025 on engine1-cr
  * Last change:  Tue Feb 11 21:15:07 2025 by hacluster via hacluster on engine1-cr
  * 3 nodes configured
  * 15 resource instances configured

Node List:
  * Online: [ engine1-cr engine2-cr engine3-cr ]

Full List of Resources:
  * Clone Set: r0-clone [r0] (promotable):
    * Promoted: [ engine1-cr ]
    * Unpromoted: [ engine2-cr engine3-cr ]
  * drbd-fs1    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs2    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs3    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs4    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs5    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs6    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs7    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs8    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs9    (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs10   (ocf:heartbeat:Filesystem):      Started engine1-cr
  * drbd-fs11   (ocf:heartbeat:Filesystem):      Started engine1-cr
  * vip1        (ocf:heartbeat:IPaddr2):         Started engine1-cr

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled

Currently, three nodes are duplicated through pacemaker.

The resource move works well, too
Reboot disables engine_vg1 volume group and does not restore drbd normally.

What is the reason?
Starting with rocky 9.5 the lvm configuration is automatically turned off and do I need to touch the lvm.conf file?

Because drbd is disabled by default in RHEL/Rocky Linux, you might not get an answer in this forum. I suggest you try the LINBIT community forum where drbd-related topics are discussed.