Hi experts,
   I am having a very strange problem with matching virtual machines 
installations with openvswitch. My operating system is suse15-sp4;

cluster12-b:~ # cat /etc/os-release
NAME="SLES"
VERSION="15-SP4"
VERSION_ID="15.4"
PRETTY_NAME="SUSE Linux Enterprise Server 15 SP4"
ID="sles"
ID_LIKE="suse"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:15:sp4"
DOCUMENTATION_URL=https://documentation.suse.com/

cluster12-b:~ # rpm -qa |grep openvswitch
openvswitch-2.14.2-150400.22.23.x86_64

cluster12-b:~ # virsh net-list --all
Name               State    Autostart   Persistent
-----------------------------------------------------
2.11-ovs-network   active   yes         yes


bond1 was used by the VMs:
...
   Bridge br-oam
        Port bond1
            trunks: [3932, 3933]
            Interface bond1
        Port "2.11-SC-2-eth1"
            tag: 3932
            Interface "2.11-SC-2-eth1"
        Port br-oam
            Interface br-oam
                type: internal
        Port "2.11-SC-2-eth2"
            tag: 3933
            Interface "2.11-SC-2-eth2"

     But when I restarted the network service by command: # service network 
restart , this port bond1 lost in the bridge br-oam ,
and there are some abnormal log in systemlog, Detailed operation logs are 
attached
...
25302 2023-10-16T13:07:12.708071+08:00 cluster12-b kernel: [340552.475586][ 
T2447] device eth1 left promiscuous mode
25303 2023-10-16T13:07:12.824022+08:00 cluster12-b kernel: [340552.593298][ 
T2447] bonding: bond0 is being deleted...
25304 2023-10-16T13:07:12.824045+08:00 cluster12-b kernel: [340552.593393][ 
T2447] bond0 (unregistering): Released all slaves
25305 2023-10-16T13:07:12.881576+08:00 cluster12-b systemd[1]: Starting 
Generate issue file for login session...
25306 2023-10-16T13:07:12.905589+08:00 cluster12-b systemd[1]: 
issue-generator.service: Deactivated successfully.
25307 2023-10-16T13:07:12.905662+08:00 cluster12-b systemd[1]: Finished 
Generate issue file for login session.
25308 2023-10-16T13:07:17.668420+08:00 cluster12-b ovs-vsctl: 
ovs|00001|vsctl|INFO|Called as /usr/bin/ovs-vsctl del-port br-oam bond1
25309 2023-10-16T13:07:17.676015+08:00 cluster12-b kernel: [340557.444150][ 
T2261] device bond1 left promiscuous mode
25310 2023-10-16T13:07:17.720080+08:00 cluster12-b kernel: [340557.486796][ 
T2447] bonding: bond1 is being deleted...
25311 2023-10-16T13:07:17.720097+08:00 cluster12-b kernel: [340557.486891][ 
T2447] bond1 (unregistering): Released all slaves

It seemed that Restarting the host's network service automatically triggered 
behavior: as /usr/bin/ovs-vsctl del-port br-oam bond1

Also, I restart host which causes the same issue, would you please help check 
and give some advice, thx~


//An

cluster12-b:~ # rpm -qa |grep openvswitch
openvswitch-2.14.2-150400.22.23.x86_64
libopenvswitch-2_14-0-2.14.2-150400.22.23.x86_64
cluster12-b:~ # cat /proc/
Display all 557 possibilities? (y or n)
cluster12-b:~ # cat /etc/os-release 
NAME="SLES"
VERSION="15-SP4"
VERSION_ID="15.4"
PRETTY_NAME="SUSE Linux Enterprise Server 15 SP4"
ID="sles"
ID_LIKE="suse"
ANSI_COLOR="0;32"
CPE_NAME="cpe:/o:suse:sles:15:sp4"
DOCUMENTATION_URL="https://documentation.suse.com/";
cluster12-b:~ # rpm -qa |grep openvswitch
openvswitch-2.14.2-150400.22.23.x86_64
libopenvswitch-2_14-0-2.14.2-150400.22.23.x86_64
cluster12-b:~ # rpm -qa |grep libvirt
libvirt-client-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-nodedev-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-iscsi-direct-8.0.0-150400.5.8.x86_64
libvirt-daemon-config-network-8.0.0-150400.5.8.x86_64
libvirt-libs-8.0.0-150400.5.8.x86_64
libvirt-glib-1_0-0-4.0.0-150400.1.10.x86_64
libvirt-daemon-driver-qemu-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-interface-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-mpath-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-disk-8.0.0-150400.5.8.x86_64
libvirt-daemon-qemu-8.0.0-150400.5.8.x86_64
python3-libvirt-python-8.0.0-150400.1.6.x86_64
system-group-libvirt-20170617-150400.22.33.noarch
libvirt-daemon-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-secret-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-network-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-rbd-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-iscsi-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-core-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-scsi-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-nwfilter-8.0.0-150400.5.8.x86_64
libvirt-daemon-driver-storage-logical-8.0.0-150400.5.8.x86_64
cluster12-b:~ # rpm -qa |grep qemu
qemu-chardev-spice-6.2.0-150400.35.10.x86_64
qemu-sgabios-8-150400.35.10.noarch
qemu-hw-usb-host-6.2.0-150400.35.10.x86_64
qemu-ui-spice-core-6.2.0-150400.35.10.x86_64
qemu-block-rbd-6.2.0-150400.35.10.x86_64
qemu-ovmf-x86_64-202202-150400.3.3.noarch
qemu-accel-tcg-x86-6.2.0-150400.35.10.x86_64
qemu-hw-display-virtio-gpu-6.2.0-150400.35.10.x86_64
qemu-ipxe-1.0.0+-150400.35.10.noarch
qemu-hw-display-virtio-vga-6.2.0-150400.35.10.x86_64
qemu-ui-opengl-6.2.0-150400.35.10.x86_64
qemu-block-curl-6.2.0-150400.35.10.x86_64
qemu-audio-spice-6.2.0-150400.35.10.x86_64
qemu-tools-6.2.0-150400.35.10.x86_64
qemu-6.2.0-150400.35.10.x86_64
libvirt-daemon-driver-qemu-8.0.0-150400.5.8.x86_64
libvirt-daemon-qemu-8.0.0-150400.5.8.x86_64
qemu-kvm-6.2.0-150400.35.10.x86_64
qemu-vgabios-1.15.0_0_g2dd4b9b-150400.35.10.noarch
qemu-ksm-6.2.0-150400.35.10.x86_64
qemu-hw-display-qxl-6.2.0-150400.35.10.x86_64
qemu-ui-gtk-6.2.0-150400.35.10.x86_64
qemu-x86-6.2.0-150400.35.10.x86_64
system-user-qemu-20170617-150400.22.33.noarch
qemu-hw-usb-redirect-6.2.0-150400.35.10.x86_64
qemu-ui-curses-6.2.0-150400.35.10.x86_64
qemu-hw-display-virtio-gpu-pci-6.2.0-150400.35.10.x86_64
qemu-ui-spice-app-6.2.0-150400.35.10.x86_64
qemu-seabios-1.15.0_0_g2dd4b9b-150400.35.10.noarch
cluster12-b:~ # virsh net-list --all
 Name               State    Autostart   Persistent
-----------------------------------------------------
 2.11-ovs-network   active   yes         yes

cluster12-b:~ # virsh list --all
 Id   Name        State
---------------------------
 1    2.11-PL-4   running
 2    2.11-SC-2   running

cluster12-b:~ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group 
default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth4: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 
state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bc brd ff:ff:ff:ff:ff:ff
    altname enp18s0f0
    altname ens3f0
3: eth5: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond1 
state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bd brd ff:ff:ff:ff:ff:ff
    altname enp18s0f1
    altname ens3f1
6: eth0: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 
state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bc brd ff:ff:ff:ff:ff:ff permaddr 
d4:f5:ef:a7:f3:88
    altname enp19s0f0
    altname ens1f0
7: eth12: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:04:44:94 brd ff:ff:ff:ff:ff:ff
    altname enp138s0f0
    altname ens5f0
8: eth8: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:04:44:95 brd ff:ff:ff:ff:ff:ff
    altname enp138s0f1
    altname ens5f1
9: eth1: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond1 
state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bd brd ff:ff:ff:ff:ff:ff permaddr 
d4:f5:ef:a7:f3:89
    altname enp19s0f1
    altname ens1f1
10: eth10: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:04:44:96 brd ff:ff:ff:ff:ff:ff
    altname enp138s0f2
    altname ens5f2
13: eth14: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:04:44:97 brd ff:ff:ff:ff:ff:ff
    altname enp138s0f3
    altname ens5f3
14: eth13: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:9d:e8:46 brd ff:ff:ff:ff:ff:ff
    altname enp180s0f0
    altname ens15f0
15: eth9: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:9d:e8:47 brd ff:ff:ff:ff:ff:ff
    altname enp180s0f1
    altname ens15f1
16: eth11: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:9d:e8:48 brd ff:ff:ff:ff:ff:ff
    altname enp180s0f2
    altname ens15f2
17: eth15: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether 00:62:0b:9d:e8:49 brd ff:ff:ff:ff:ff:ff
    altname enp180s0f3
    altname ens15f3
18: ovs-netdev: <BROADCAST,MULTICAST,PROMISC> mtu 1500 qdisc noop state DOWN 
group default qlen 1000
    link/ether 5a:a4:15:3b:55:97 brd ff:ff:ff:ff:ff:ff
19: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group 
default qlen 1000
    link/ether 5a:3b:b3:e7:1f:c5 brd ff:ff:ff:ff:ff:ff
20: br-int: <BROADCAST,MULTICAST,PROMISC> mtu 1500 qdisc noop state DOWN group 
default qlen 1000
    link/ether d4:f5:ef:a7:fd:be brd ff:ff:ff:ff:ff:ff
21: br-oam: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default 
qlen 1000
    link/ether d4:f5:ef:a7:fd:bd brd ff:ff:ff:ff:ff:ff
22: br-trf: <BROADCAST,MULTICAST,PROMISC> mtu 1500 qdisc noop state DOWN group 
default qlen 1000
    link/ether d4:f5:ef:a7:fd:bf brd ff:ff:ff:ff:ff:ff
23: bond1: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue 
master ovs-system state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bd brd ff:ff:ff:ff:ff:ff
    inet 10.120.96.134/25 brd 10.120.96.255 scope global bond1
       valid_lft forever preferred_lft forever
    inet6 fe80::d6f5:efff:fea7:fdbd/64 scope link 
       valid_lft forever preferred_lft forever
24: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue 
state UP group default qlen 1000
    link/ether d4:f5:ef:a7:fd:bc brd ff:ff:ff:ff:ff:ff
    inet 10.120.96.133/25 brd 10.120.96.255 scope global bond0
       valid_lft forever preferred_lft forever
    inet6 fe80::d6f5:efff:fea7:fdbc/64 scope link 
       valid_lft forever preferred_lft forever
25: 2.11-SC-2-eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast 
master ovs-system state UNKNOWN group default qlen 1000
    link/ether fe:10:20:01:02:01 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::fc10:20ff:fe01:201/64 scope link 
       valid_lft forever preferred_lft forever
26: 2.11-SC-2-eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast 
master ovs-system state UNKNOWN group default qlen 1000
    link/ether fe:10:20:01:02:02 brd ff:ff:ff:ff:ff:ff
    inet6 fe80::fc10:20ff:fe01:202/64 scope link 
       valid_lft forever preferred_lft forever
cluster12-b:~ # cd /etc/sysconfig/network/
cluster12-b:/etc/sysconfig/network # ll
total 84
-rw-r--r-- 1 root root  9691 Sep 26 18:04 config
-rw-r--r-- 1 root root 14771 Sep 26 16:48 dhcp
drwxr-xr-x 2 root root  4096 Sep 27 16:01 if-down.d
drwxr-xr-x 2 root root  4096 Mar 15  2022 if-up.d
-rw-r--r-- 1 root root   385 Sep 26 17:41 ifcfg-bond0
-rw-r--r-- 1 root root   405 Sep 27 16:03 ifcfg-bond1
-rw------- 1 root root   147 Sep 26 16:48 ifcfg-lo
-rw-r--r-- 1 root root 21738 Jul  7  2021 ifcfg.template
-rw-r--r-- 1 root root    31 Sep 26 17:57 ifroute-bond0
drwx------ 2 root root  4096 Mar 15  2022 providers
-rw-r--r-- 1 root root     0 Sep 26 17:57 routes
-rw-r--r-- 1 root root     0 Sep 26 17:57 routes.YaST2save
drwxr-xr-x 2 root root  4096 Sep 26 16:47 scripts
cluster12-b:/etc/sysconfig/network # cat ifcfg-bond1
DEVICE='bond1'
BONDING_MASTER='yes'
BONDING_MODULE_OPTS='mode=active-backup miimon=100 use_carrier=0'
BONDING_SLAVE0='eth1'
BONDING_SLAVE1='eth5'
BOOTPROTO='static'
BORADCAST=''
ETHTOOL_OPTIONS=''
IPADDR='10.120.96.134'
MTU=''
NAME=''
NETMASK='255.255.255.128'
NETWORK='10.120.96.128'
REMOTE_IPADDR=''
STARTMODE='auto'
USERCONTROL='no'
ZONE=public
OVS_BRIDGE=br-oam
DEVICETYPE=ovs
TYPE=OVSPort
ONBOOT=yes
cluster12-b:/etc/sysconfig/network # 
cluster12-b:/etc/sysconfig/network # ovs-vsctl show
2e9bf291-50ac-4c3a-ac55-2d590df1880d
    Bridge br-trf
        datapath_type: netdev
        Port br-trf
            Interface br-trf
                type: internal
        Port "2.11-PL-4-eth1"
            tag: 3935
            Interface "2.11-PL-4-eth1"
                type: dpdkvhostuser
        Port "2.11-PL-4-eth2"
            tag: 3934
            Interface "2.11-PL-4-eth2"
                type: dpdkvhostuser
        Port bond3
            trunks: [3934, 3935]
            Interface dpdk3
                type: dpdk
                options: {dpdk-devargs="0000:13:00.3"}
            Interface dpdk1
                type: dpdk
                options: {dpdk-devargs="0000:12:00.3"}
    Bridge br-oam
        Port "2.11-SC-2-eth1"
            tag: 3932
            Interface "2.11-SC-2-eth1"
        Port br-oam
            Interface br-oam
                type: internal
        Port bond1
            trunks: [3932, 3933]
            Interface bond1
        Port "2.11-SC-2-eth2"
            tag: 3933
            Interface "2.11-SC-2-eth2"
    Bridge br-int
        datapath_type: netdev
        Port br-int
            Interface br-int
                type: internal
        Port "2.11-PL-4-eth0"
            tag: 1012
            Interface "2.11-PL-4-eth0"
                type: dpdkvhostuser
        Port bond2
            trunks: [1012]
            Interface dpdk2
                type: dpdk
                options: {dpdk-devargs="0000:13:00.2"}
            Interface dpdk0
                type: dpdk
                options: {dpdk-devargs="0000:12:00.2"}
        Port "2.11-SC-2-eth0"
            tag: 1012
            Interface "2.11-SC-2-eth0"
                type: dpdkvhostuser
    ovs_version: "2.14.2"
cluster12-b:/etc/sysconfig/network # 
cluster12-b:/etc/sysconfig/network # date
Mon Oct 16 13:05:16 CST 2023
cluster12-b:/etc/sysconfig/network # tail -f /var/log/messages
2023-10-16T13:01:40.735629+08:00 cluster12-b sshd[6102]: Disconnected from user 
root 10.158.51.69 port 60794
2023-10-16T13:01:40.736160+08:00 cluster12-b sshd[6102]: 
pam_unix(sshd:session): session closed for user root
2023-10-16T13:01:40.736931+08:00 cluster12-b systemd[1]: session-335.scope: 
Deactivated successfully.
2023-10-16T13:01:40.737738+08:00 cluster12-b systemd-logind[1654]: Session 335 
logged out. Waiting for processes to exit.
2023-10-16T13:01:40.738187+08:00 cluster12-b systemd-logind[1654]: Removed 
session 335.
2023-10-16T13:05:05.543773+08:00 cluster12-b ovs-vsctl: 
ovs|00001|db_ctl_base|ERR|'show' command takes at most 0 arguments (note that 
options must precede command names and follow a "--" argument)
2023-10-16T13:05:06.030653+08:00 cluster12-b sshd[6139]: Accepted 
keyboard-interactive/pam for root from 10.158.51.69 port 34084 ssh2
2023-10-16T13:05:06.033619+08:00 cluster12-b systemd-logind[1654]: New session 
336 of user root.
2023-10-16T13:05:06.033887+08:00 cluster12-b systemd[1]: Started Session 336 of 
User root.
2023-10-16T13:05:06.035540+08:00 cluster12-b sshd[6139]: 
pam_unix(sshd:session): session opened for user root by (uid=0)
^C
cluster12-b:/etc/sysconfig/network # tail -f /var/log/openvswitch/ovs
ovs-vswitchd.log  ovsdb-server.log  
cluster12-b:/etc/sysconfig/network # tail -f 
/var/log/openvswitch/ovs-vswitchd.log 
2023-10-16T05:01:10.925Z|09124|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 10 
log messages in last 57 seconds (most recently, 13 seconds ago) due to 
excessive rate
2023-10-16T05:01:10.925Z|09125|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond2 not configured for trunking VLAN 0 on 
bridge br-int while processing 
in_port=2,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:ec,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
2023-10-16T05:02:07.144Z|09126|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 9 
log messages in last 53 seconds (most recently, 14 seconds ago) due to 
excessive rate
2023-10-16T05:02:07.144Z|09127|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond2 not configured for trunking VLAN 0 on 
bridge br-int while processing 
in_port=2,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:ec,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
2023-10-16T05:03:01.203Z|09128|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 9 
log messages in last 50 seconds (most recently, 13 seconds ago) due to 
excessive rate
2023-10-16T05:03:01.203Z|09129|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond2 not configured for trunking VLAN 0 on 
bridge br-int while processing 
in_port=2,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:ec,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
2023-10-16T05:04:04.204Z|09130|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 10 
log messages in last 56 seconds (most recently, 11 seconds ago) due to 
excessive rate
2023-10-16T05:04:04.204Z|09131|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond3 not configured for trunking VLAN 0 on 
bridge br-trf while processing 
in_port=1,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:ed,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
2023-10-16T05:05:00.483Z|09132|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 10 
log messages in last 55 seconds (most recently, 2 seconds ago) due to excessive 
rate
2023-10-16T05:05:00.483Z|09133|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond3 not configured for trunking VLAN 0 on 
bridge br-trf while processing 
in_port=2,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:ee,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
2023-10-16T05:06:03.556Z|09134|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|Dropped 11 
log messages in last 57 seconds (most recently, 4 seconds ago) due to excessive 
rate
2023-10-16T05:06:03.556Z|09135|ofproto_dpif_xlate(pmd-c24/id:9)|WARN|dropping 
VLAN 0 packet received on port bond2 not configured for trunking VLAN 0 on 
bridge br-int while processing 
in_port=1,vlan_tci=0x0000,dl_src=20:4e:71:a7:f2:eb,dl_dst=01:80:c2:00:00:0e,dl_type=0x88cc
^C^C
cluster12-b:/etc/sysconfig/network # 
cluster12-b:/etc/sysconfig/network # service network restart 
cluster12-b:/etc/sysconfig/network # ovs-vsctl show
2e9bf291-50ac-4c3a-ac55-2d590df1880d
    Bridge br-trf
        datapath_type: netdev
        Port br-trf
            Interface br-trf
                type: internal
        Port "2.11-PL-4-eth1"
            tag: 3935
            Interface "2.11-PL-4-eth1"
                type: dpdkvhostuser
        Port "2.11-PL-4-eth2"
            tag: 3934
            Interface "2.11-PL-4-eth2"
                type: dpdkvhostuser
        Port bond3
            trunks: [3934, 3935]
            Interface dpdk3
                type: dpdk
                options: {dpdk-devargs="0000:13:00.3"}
            Interface dpdk1
                type: dpdk
                options: {dpdk-devargs="0000:12:00.3"}
    Bridge br-oam
        Port "2.11-SC-2-eth1"
            tag: 3932
            Interface "2.11-SC-2-eth1"
        Port br-oam
            Interface br-oam
                type: internal
        Port "2.11-SC-2-eth2"
            tag: 3933
            Interface "2.11-SC-2-eth2"
    Bridge br-int
        datapath_type: netdev
        Port br-int
            Interface br-int
                type: internal
        Port "2.11-PL-4-eth0"
            tag: 1012
            Interface "2.11-PL-4-eth0"
                type: dpdkvhostuser
        Port bond2
            trunks: [1012]
            Interface dpdk2
                type: dpdk
                options: {dpdk-devargs="0000:13:00.2"}
            Interface dpdk0
                type: dpdk
                options: {dpdk-devargs="0000:12:00.2"}
        Port "2.11-SC-2-eth0"
            tag: 1012
            Interface "2.11-SC-2-eth0"
                type: dpdkvhostuser
    ovs_version: "2.14.2"
cluster12-b:/etc/sysconfig/network # ll
total 84
-rw-r--r-- 1 root root  9691 Sep 26 18:04 config
-rw-r--r-- 1 root root 14771 Sep 26 16:48 dhcp
drwxr-xr-x 2 root root  4096 Sep 27 16:01 if-down.d
drwxr-xr-x 2 root root  4096 Mar 15  2022 if-up.d
-rw-r--r-- 1 root root   385 Sep 26 17:41 ifcfg-bond0
-rw-r--r-- 1 root root   405 Sep 27 16:03 ifcfg-bond1
-rw------- 1 root root   147 Sep 26 16:48 ifcfg-lo
-rw-r--r-- 1 root root 21738 Jul  7  2021 ifcfg.template
-rw-r--r-- 1 root root    31 Sep 26 17:57 ifroute-bond0
drwx------ 2 root root  4096 Mar 15  2022 providers
-rw-r--r-- 1 root root     0 Sep 26 17:57 routes
-rw-r--r-- 1 root root     0 Sep 26 17:57 routes.YaST2save
drwxr-xr-x 2 root root  4096 Sep 26 16:47 scripts
cluster12-b:/etc/sysconfig/network # cat ifcfg-bond1
DEVICE='bond1'
BONDING_MASTER='yes'
BONDING_MODULE_OPTS='mode=active-backup miimon=100 use_carrier=0'
BONDING_SLAVE0='eth1'
BONDING_SLAVE1='eth5'
BOOTPROTO='static'
BORADCAST=''
ETHTOOL_OPTIONS=''
IPADDR='10.120.96.134'
MTU=''
NAME=''
NETMASK='255.255.255.128'
NETWORK='10.120.96.128'
REMOTE_IPADDR=''
STARTMODE='auto'
USERCONTROL='no'
ZONE=public
OVS_BRIDGE=br-oam
DEVICETYPE=ovs
TYPE=OVSPort
ONBOOT=yes
cluster12-b:/etc/sysconfig/network # 



cluster12-b:/etc/sysconfig/network # tail -f /var/log/messages
...
2023-10-16T13:07:12.272846+08:00 cluster12-b systemd[1]: Stopping wicked 
managed network interfaces...
2023-10-16T13:07:12.340029+08:00 cluster12-b kernel: [340552.109905][ T2447] 
bond0: (slave eth4): Releasing backup interface
2023-10-16T13:07:12.340046+08:00 cluster12-b kernel: [340552.109908][ T2447] 
bond0: (slave eth4): the permanent HWaddr of slave - d4:f5:ef:a7:fd:bc - is 
still in use by bond - set the HWaddr of slave to a different address to avoid 
conflicts
2023-10-16T13:07:12.340048+08:00 cluster12-b kernel: [340552.109912][ T2447] 
bond0: (slave eth0): making interface the new active one
2023-10-16T13:07:12.536056+08:00 cluster12-b kernel: [340552.303930][ T2447] 
bond1: (slave eth5): Releasing backup interface
2023-10-16T13:07:12.536088+08:00 cluster12-b kernel: [340552.303933][ T2447] 
bond1: (slave eth5): the permanent HWaddr of slave - d4:f5:ef:a7:fd:bd - is 
still in use by bond - set the HWaddr of slave to a different address to avoid 
conflicts
2023-10-16T13:07:12.536090+08:00 cluster12-b kernel: [340552.303935][ T2447] 
device eth5 left promiscuous mode
2023-10-16T13:07:12.536093+08:00 cluster12-b kernel: [340552.303950][ T2447] 
bond1: (slave eth1): making interface the new active one
2023-10-16T13:07:12.536094+08:00 cluster12-b kernel: [340552.303951][ T2447] 
device eth1 entered promiscuous mode
2023-10-16T13:07:12.620020+08:00 cluster12-b kernel: [340552.387473][ T2447] 
bond0: (slave eth0): Releasing backup interface
2023-10-16T13:07:12.708054+08:00 cluster12-b kernel: [340552.475583][ T2447] 
bond1: (slave eth1): Releasing backup interface
2023-10-16T13:07:12.708071+08:00 cluster12-b kernel: [340552.475586][ T2447] 
device eth1 left promiscuous mode
2023-10-16T13:07:12.824022+08:00 cluster12-b kernel: [340552.593298][ T2447] 
bonding: bond0 is being deleted...
2023-10-16T13:07:12.824045+08:00 cluster12-b kernel: [340552.593393][ T2447] 
bond0 (unregistering): Released all slaves
2023-10-16T13:07:12.881576+08:00 cluster12-b systemd[1]: Starting Generate 
issue file for login session...
2023-10-16T13:07:12.905589+08:00 cluster12-b systemd[1]: 
issue-generator.service: Deactivated successfully.
2023-10-16T13:07:12.905662+08:00 cluster12-b systemd[1]: Finished Generate 
issue file for login session.
2023-10-16T13:07:17.668420+08:00 cluster12-b ovs-vsctl: 
ovs|00001|vsctl|INFO|Called as /usr/bin/ovs-vsctl del-port br-oam bond1
2023-10-16T13:07:17.676015+08:00 cluster12-b kernel: [340557.444150][ T2261] 
device bond1 left promiscuous mode
2023-10-16T13:07:17.720080+08:00 cluster12-b kernel: [340557.486796][ T2447] 
bonding: bond1 is being deleted...
2023-10-16T13:07:17.720097+08:00 cluster12-b kernel: [340557.486891][ T2447] 
bond1 (unregistering): Released all slaves
2023-10-16T13:07:17.775434+08:00 cluster12-b systemd[1]: Starting Generate 
issue file for login session...
2023-10-16T13:07:17.775593+08:00 cluster12-b wicked[6219]: eth4            
device-ready
2023-10-16T13:07:17.775618+08:00 cluster12-b wicked[6219]: eth5            
device-ready
2023-10-16T13:07:17.775632+08:00 cluster12-b wicked[6219]: eth0            
device-ready
2023-10-16T13:07:17.775652+08:00 cluster12-b wicked[6219]: eth1            
device-ready
2023-10-16T13:07:17.776256+08:00 cluster12-b systemd[1]: wicked.service: 
Deactivated successfully.
2023-10-16T13:07:17.776421+08:00 cluster12-b systemd[1]: Stopped wicked managed 
network interfaces.
2023-10-16T13:07:17.777843+08:00 cluster12-b systemd[1]: Starting wicked 
managed network interfaces...
2023-10-16T13:07:17.813063+08:00 cluster12-b systemd-udevd[6652]: Using default 
interface naming scheme 'sle15-sp4'.
2023-10-16T13:07:17.813130+08:00 cluster12-b systemd-udevd[6652]: bond1: Could 
not generate persistent MAC: No data available
2023-10-16T13:07:17.819574+08:00 cluster12-b systemd[1]: 
issue-generator.service: Deactivated successfully.
2023-10-16T13:07:17.819641+08:00 cluster12-b systemd[1]: Finished Generate 
issue file for login session.
2023-10-16T13:07:17.819788+08:00 cluster12-b systemd-udevd[6656]: Using default 
interface naming scheme 'sle15-sp4'.
2023-10-16T13:07:17.819819+08:00 cluster12-b systemd-udevd[6656]: bond0: Could 
not generate persistent MAC: No data available
2023-10-16T13:07:17.893104+08:00 cluster12-b systemd[1]: Starting Generate 
issue file for login session...
2023-10-16T13:07:17.965492+08:00 cluster12-b systemd[1]: 
issue-generator.service: Deactivated successfully.
2023-10-16T13:07:17.966231+08:00 cluster12-b systemd[1]: Finished Generate 
issue file for login session.
2023-10-16T13:07:17.976144+08:00 cluster12-b kernel: [340557.743533][ T2447] 
8021q: adding VLAN 0 to HW filter on device eth4
2023-10-16T13:07:17.976155+08:00 cluster12-b kernel: [340557.743850][ T2447] 
bond0: (slave eth4): Enslaving as a backup interface with a down link
2023-10-16T13:07:18.068055+08:00 cluster12-b kernel: [340557.835476][ T2447] 
8021q: adding VLAN 0 to HW filter on device eth5
2023-10-16T13:07:18.068072+08:00 cluster12-b kernel: [340557.835773][ T2447] 
bond1: (slave eth5): Enslaving as a backup interface with a down link
2023-10-16T13:07:18.120238+08:00 cluster12-b kernel: [340557.887432][ T2447] 
8021q: adding VLAN 0 to HW filter on device eth0
2023-10-16T13:07:18.120360+08:00 cluster12-b kernel: [340557.887719][ T2447] 
bond0: (slave eth0): Enslaving as a backup interface with a down link
2023-10-16T13:07:18.188044+08:00 cluster12-b kernel: [340557.955512][ T2447] 
8021q: adding VLAN 0 to HW filter on device eth1
2023-10-16T13:07:18.188061+08:00 cluster12-b kernel: [340557.955824][ T2447] 
bond1: (slave eth1): Enslaving as a backup interface with a down link
2023-10-16T13:07:18.208017+08:00 cluster12-b kernel: [340557.978339][ T2447] 
8021q: adding VLAN 0 to HW filter on device bond1
2023-10-16T13:07:18.208017+08:00 cluster12-b kernel: [340557.978339][ T2447] 
8021q: adding VLAN 0 to HW filter on device bond1
2023-10-16T13:07:18.212014+08:00 cluster12-b kernel: [340557.979952][ T2447] 
8021q: adding VLAN 0 to HW filter on device bond0
2023-10-16T13:07:21.045759+08:00 cluster12-b smad[3506]: [INFO  ]: AgentX trap 
received
2023-10-16T13:07:21.045822+08:00 cluster12-b smad[3506]: [NOTICE]: AgentX trap 
CPQNIC (.1.3.6.1.6.3.1.1.4.1.0:.1.3.6.1.4.1.232.0.18011)
2023-10-16T13:07:21.048015+08:00 cluster12-b kernel: [340560.815492][ T6220] 
igb 0000:13:00.0 eth0: igb: eth0 NIC Link is Up 1000 Mbps Full Duplex, Flow 
Control: RX/TX
2023-10-16T13:07:21.124015+08:00 cluster12-b kernel: [340560.891442][ T6040] 
bond0: (slave eth0): link status definitely up, 1000 Mbps full duplex
2023-10-16T13:07:21.124024+08:00 cluster12-b kernel: [340560.891446][ T6040] 
bond0: (slave eth0): making interface the new active one
2023-10-16T13:07:21.124025+08:00 cluster12-b kernel: [340560.891653][ T6040] 
bond0: active interface up!
2023-10-16T13:07:21.124027+08:00 cluster12-b kernel: [340560.891662][ T6220] 
IPv6: ADDRCONF(NETDEV_CHANGE): bond0: link becomes ready
2023-10-16T13:07:21.441547+08:00 cluster12-b smad[3506]: [INFO  ]: AgentX trap 
received
2023-10-16T13:07:21.441591+08:00 cluster12-b smad[3506]: [NOTICE]: AgentX trap 
CPQNIC (.1.3.6.1.6.3.1.1.4.1.0:.1.3.6.1.4.1.232.0.18011)
2023-10-16T13:07:21.444013+08:00 cluster12-b kernel: [340561.211492][ T6220] 
igb 0000:12:00.0 eth4: igb: eth4 NIC Link is Up 1000 Mbps Full Duplex, Flow 
Control: RX/TX
2023-10-16T13:07:21.540015+08:00 cluster12-b kernel: [340561.307222][ T6040] 
bond0: (slave eth4): link status definitely up, 1000 Mbps full duplex
2023-10-16T13:07:21.569637+08:00 cluster12-b smad[3506]: [INFO  ]: AgentX trap 
received
2023-10-16T13:07:21.569688+08:00 cluster12-b smad[3506]: [NOTICE]: AgentX trap 
CPQNIC (.1.3.6.1.6.3.1.1.4.1.0:.1.3.6.1.4.1.232.0.18011)
2023-10-16T13:07:21.572011+08:00 cluster12-b kernel: [340561.339491][ T6220] 
igb 0000:12:00.1 eth5: igb: eth5 NIC Link is Up 1000 Mbps Full Duplex, Flow 
Control: RX/TX
2023-10-16T13:07:21.644014+08:00 cluster12-b kernel: [340561.411338][ T6593] 
bond1: (slave eth5): link status definitely up, 1000 Mbps full duplex
2023-10-16T13:07:21.644024+08:00 cluster12-b kernel: [340561.411342][ T6593] 
bond1: (slave eth5): making interface the new active one
2023-10-16T13:07:21.644025+08:00 cluster12-b kernel: [340561.411550][ T6593] 
bond1: active interface up!
2023-10-16T13:07:21.644026+08:00 cluster12-b kernel: [340561.411556][ T6201] 
IPv6: ADDRCONF(NETDEV_CHANGE): bond1: link becomes ready
2023-10-16T13:07:21.657509+08:00 cluster12-b smad[3506]: [INFO  ]: AgentX trap 
received
2023-10-16T13:07:21.657547+08:00 cluster12-b smad[3506]: [NOTICE]: AgentX trap 
CPQNIC (.1.3.6.1.6.3.1.1.4.1.0:.1.3.6.1.4.1.232.0.18011)
2023-10-16T13:07:21.660011+08:00 cluster12-b kernel: [340561.427533][ T6220] 
igb 0000:13:00.1 eth1: igb: eth1 NIC Link is Up 1000 Mbps Full Duplex, Flow 
Control: RX/TX
2023-10-16T13:07:21.748011+08:00 cluster12-b kernel: [340561.515218][ T6593] 
bond1: (slave eth1): link status definitely up, 1000 Mbps full duplex
2023-10-16T13:07:23.207968+08:00 cluster12-b wicked[6709]: lo              up
2023-10-16T13:07:23.208037+08:00 cluster12-b wicked[6709]: eth4            
enslaved
2023-10-16T13:07:23.208055+08:00 cluster12-b wicked[6709]: eth5            
enslaved
2023-10-16T13:07:23.208069+08:00 cluster12-b wicked[6709]: eth0            
enslaved
2023-10-16T13:07:23.208082+08:00 cluster12-b wicked[6709]: eth1            
enslaved
2023-10-16T13:07:23.208094+08:00 cluster12-b wicked[6709]: bond1           up
2023-10-16T13:07:23.208106+08:00 cluster12-b wicked[6709]: bond0           up
2023-10-16T13:07:23.209373+08:00 cluster12-b systemd[1]: Finished wicked 
managed network interfaces.
                                                                                
                                                                                
                
cluster12-b:/etc/sysconfig/network # tail -f 
/var/log/openvswitch/ovs-vswitchd.log
...
77919 2023-10-16T05:07:12.341Z|00383|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77920 2023-10-16T05:07:12.341Z|00384|bridge|WARN|Dropped 3 log messages in last 
340337 seconds (most recently, 340337 seconds ago) due to excessive rate
77921 2023-10-16T05:07:12.341Z|00385|bridge|WARN|port bond2: Using the default 
bond_mode active-backup. Note that in previous versions, the default bond_mode 
was balance-slb
77922 2023-10-16T05:07:12.391Z|00386|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77923 2023-10-16T05:07:12.391Z|00387|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77924 2023-10-16T05:07:12.391Z|00388|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77925 2023-10-16T05:07:12.391Z|00389|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77926 2023-10-16T05:07:12.420Z|00390|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77927 2023-10-16T05:07:12.420Z|00391|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77928 2023-10-16T05:07:12.421Z|00392|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77929 2023-10-16T05:07:12.421Z|00393|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77930 2023-10-16T05:07:12.535Z|00394|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77931 2023-10-16T05:07:12.535Z|00395|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77932 2023-10-16T05:07:12.535Z|00396|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77933 2023-10-16T05:07:12.535Z|00397|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77934 2023-10-16T05:07:12.744Z|00398|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77935 2023-10-16T05:07:12.744Z|00399|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77936 2023-10-16T05:07:12.744Z|00400|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77937 2023-10-16T05:07:12.744Z|00401|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77938 2023-10-16T05:07:12.744Z|00402|bridge|INFO|bridge br-oam: using datapath 
ID 000026dad783914d
77939 2023-10-16T05:07:12.755Z|00403|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77940 2023-10-16T05:07:12.755Z|00404|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77941 2023-10-16T05:07:12.755Z|00405|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77942 2023-10-16T05:07:12.755Z|00406|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77943 2023-10-16T05:07:12.791Z|00407|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77944 2023-10-16T05:07:12.791Z|00408|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77945 2023-10-16T05:07:12.791Z|00409|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77946 2023-10-16T05:07:12.791Z|00410|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77947 2023-10-16T05:07:12.834Z|00411|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77948 2023-10-16T05:07:12.834Z|00412|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77949 2023-10-16T05:07:12.834Z|00413|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77950 2023-10-16T05:07:12.834Z|00414|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77951 2023-10-16T05:07:17.672Z|00415|bridge|INFO|bridge br-oam: deleted 
interface bond1 on port 3
77952 2023-10-16T05:07:17.672Z|00416|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77953 2023-10-16T05:07:17.672Z|00417|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77954 2023-10-16T05:07:17.673Z|00418|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77955 2023-10-16T05:07:17.673Z|00419|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
77956 2023-10-16T05:07:17.693Z|00420|netdev_dpdk|WARN|Failed to enable flow 
control on device 0
77957 2023-10-16T05:07:17.694Z|00421|netdev_dpdk|WARN|Failed to enable flow 
control on device 2
77958 2023-10-16T05:07:17.694Z|00422|netdev_dpdk|WARN|Failed to enable flow 
control on device 1
77959 2023-10-16T05:07:17.694Z|00423|netdev_dpdk|WARN|Failed to enable flow 
control on device 3
                                                                                
                                                                                
                                                                                
                                                                                
                                
                                                                                
                                                                                
                                                                                
                                                                                
                                
                                                                                
                                                                                
                                                                                
                                                                                
                                
                                                                                
                                                                                
                                                                                
                                                                                
                                
                                                                                
                                                                                
                                                                                
                                                                                
                                
_______________________________________________
discuss mailing list
disc...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-discuss

Reply via email to