<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:16:44 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8346] conf-sanity test_93: test failed to respond and timed out</title>
                <link>https://jira.whamcloud.com/browse/LU-8346</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for bfaccini &amp;lt;bruno.faccini@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/59eb46d2-3d9f-11e6-a0ce-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/59eb46d2-3d9f-11e6-a0ce-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_93 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Test log indicates onyx-32vm3 is not responding correctly after parallel mount of MDS&lt;span class=&quot;error&quot;&gt;&amp;#91;2,4&amp;#93;&lt;/span&gt; :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== conf-sanity test 93: register mulitple MDT at the same time ======================================= 15:20:48 (1467152448)
Stopping clients: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2 /mnt/lustre (opts:)
CMD: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2 running=\$(grep -c /mnt/lustre&apos; &apos; /proc/mounts);
if [ \$running -ne 0 ] ; then
echo Stopping client \$(hostname) /mnt/lustre opts:;
lsof /mnt/lustre || need_kill=no;
if [ x != x -a x\$need_kill != xno ]; then
    pids=\$(lsof -t /mnt/lustre | sort -u);
    if [ -n \&quot;\$pids\&quot; ]; then
             kill -9 \$pids;
    fi
fi;
while umount  /mnt/lustre 2&amp;gt;&amp;amp;1 | grep -q busy; do
    echo /mnt/lustre is still busy, wait one second &amp;amp;&amp;amp; sleep 1;
done;
fi
Stopping clients: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2 /mnt/lustre2 (opts:)
CMD: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2 running=\$(grep -c /mnt/lustre2&apos; &apos; /proc/mounts);
if [ \$running -ne 0 ] ; then
echo Stopping client \$(hostname) /mnt/lustre2 opts:;
lsof /mnt/lustre2 || need_kill=no;
if [ x != x -a x\$need_kill != xno ]; then
    pids=\$(lsof -t /mnt/lustre2 | sort -u);
    if [ -n \&quot;\$pids\&quot; ]; then
             kill -9 \$pids;
    fi
fi;
while umount  /mnt/lustre2 2&amp;gt;&amp;amp;1 | grep -q busy; do
    echo /mnt/lustre2 is still busy, wait one second &amp;amp;&amp;amp; sleep 1;
done;
fi
CMD: onyx-32vm7 grep -c /mnt/lustre-mds1&apos; &apos; /proc/mounts
CMD: onyx-32vm7 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm3 grep -c /mnt/lustre-mds2&apos; &apos; /proc/mounts
CMD: onyx-32vm3 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm7 grep -c /mnt/lustre-mds3&apos; &apos; /proc/mounts
CMD: onyx-32vm7 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm3 grep -c /mnt/lustre-mds4&apos; &apos; /proc/mounts
CMD: onyx-32vm3 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost1&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost2&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost3&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost4&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost5&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost6&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost7&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 grep -c /mnt/lustre-ost8&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm2,onyx-32vm3,onyx-32vm7,onyx-32vm8 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_hostid 
Loading modules from /usr/lib64/lustre
detected 2 online CPUs by sysfs
Force libcfs to create 2 CPU partitions
debug=-1
subsystem_debug=all -lnet -lnd -pinger
Formatting mgs, mds, osts
Format mds1: /dev/lvm-Role_MDS/P1
CMD: onyx-32vm7 grep -c /mnt/lustre-mds1&apos; &apos; /proc/mounts
CMD: onyx-32vm7 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm7 mkfs.lustre --mgs --fsname=lustre --mdt --index=0 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_MDS/P1

   Permanent disk data:
Target:     lustre:MDT0000
Index:      0
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x65
              (MDT MGS first_time update )
Persistent mount opts: user_xattr,errors=remount-ro
Parameters: sys.timeout=20 lov.stripesize=1048576 lov.stripecount=0 mdt.identity_upcall=/usr/sbin/l_getidentity

device size = 2048MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_MDS/P1
	target name   lustre:MDT0000
	4k blocks     50000
	options        -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:MDT0000  -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F /dev/lvm-Role_MDS/P1 50000
Writing CONFIGS/mountdata
Format mds2: /dev/lvm-Role_MDS/P2
CMD: onyx-32vm3 grep -c /mnt/lustre-mds2&apos; &apos; /proc/mounts
CMD: onyx-32vm3 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm3 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --mdt --index=1 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_MDS/P2

   Permanent disk data:
Target:     lustre:MDT0001
Index:      1
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x61
              (MDT first_time update )
Persistent mount opts: user_xattr,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20 lov.stripesize=1048576 lov.stripecount=0 mdt.identity_upcall=/usr/sbin/l_getidentity

device size = 2048MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_MDS/P2
	target name   lustre:MDT0001
	4k blocks     50000
	options        -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:MDT0001  -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F /dev/lvm-Role_MDS/P2 50000
Writing CONFIGS/mountdata
Format mds3: /dev/lvm-Role_MDS/P3
CMD: onyx-32vm7 grep -c /mnt/lustre-mds3&apos; &apos; /proc/mounts
CMD: onyx-32vm7 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm7 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --mdt --index=2 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_MDS/P3

   Permanent disk data:
Target:     lustre:MDT0002
Index:      2
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x61
              (MDT first_time update )
Persistent mount opts: user_xattr,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20 lov.stripesize=1048576 lov.stripecount=0 mdt.identity_upcall=/usr/sbin/l_getidentity

device size = 2048MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_MDS/P3
	target name   lustre:MDT0002
	4k blocks     50000
	options        -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:MDT0002  -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F /dev/lvm-Role_MDS/P3 50000
Writing CONFIGS/mountdata
Format mds4: /dev/lvm-Role_MDS/P4
CMD: onyx-32vm3 grep -c /mnt/lustre-mds4&apos; &apos; /proc/mounts
CMD: onyx-32vm3 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm3 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --mdt --index=3 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_MDS/P4

   Permanent disk data:
Target:     lustre:MDT0003
Index:      3
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x61
              (MDT first_time update )
Persistent mount opts: user_xattr,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20 lov.stripesize=1048576 lov.stripecount=0 mdt.identity_upcall=/usr/sbin/l_getidentity

device size = 2048MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_MDS/P4
	target name   lustre:MDT0003
	4k blocks     50000
	options        -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:MDT0003  -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,dir_nlink,quota,huge_file,flex_bg -E lazy_itable_init,lazy_journal_init -F /dev/lvm-Role_MDS/P4 50000
Writing CONFIGS/mountdata
Format ost1: /dev/lvm-Role_OSS/P1
CMD: onyx-32vm8 grep -c /mnt/lustre-ost1&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=0 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P1

   Permanent disk data:
Target:     lustre:OST0000
Index:      0
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P1
	target name   lustre:OST0000
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0000  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P1 50000
Writing CONFIGS/mountdata
Format ost2: /dev/lvm-Role_OSS/P2
CMD: onyx-32vm8 grep -c /mnt/lustre-ost2&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=1 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P2

   Permanent disk data:
Target:     lustre:OST0001
Index:      1
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P2
	target name   lustre:OST0001
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0001  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P2 50000
Writing CONFIGS/mountdata
Format ost3: /dev/lvm-Role_OSS/P3
CMD: onyx-32vm8 grep -c /mnt/lustre-ost3&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=2 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P3

   Permanent disk data:
Target:     lustre:OST0002
Index:      2
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P3
	target name   lustre:OST0002
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0002  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P3 50000
Writing CONFIGS/mountdata
Format ost4: /dev/lvm-Role_OSS/P4
CMD: onyx-32vm8 grep -c /mnt/lustre-ost4&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=3 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P4

   Permanent disk data:
Target:     lustre:OST0003
Index:      3
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P4
	target name   lustre:OST0003
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0003  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P4 50000
Writing CONFIGS/mountdata
Format ost5: /dev/lvm-Role_OSS/P5
CMD: onyx-32vm8 grep -c /mnt/lustre-ost5&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=4 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P5

   Permanent disk data:
Target:     lustre:OST0004
Index:      4
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P5
	target name   lustre:OST0004
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0004  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P5 50000
Writing CONFIGS/mountdata
Format ost6: /dev/lvm-Role_OSS/P6
CMD: onyx-32vm8 grep -c /mnt/lustre-ost6&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=5 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P6

   Permanent disk data:
Target:     lustre:OST0005
Index:      5
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P6
	target name   lustre:OST0005
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0005  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P6 50000
Writing CONFIGS/mountdata
Format ost7: /dev/lvm-Role_OSS/P7
CMD: onyx-32vm8 grep -c /mnt/lustre-ost7&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=6 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P7

   Permanent disk data:
Target:     lustre:OST0006
Index:      6
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P7
	target name   lustre:OST0006
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0006  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P7 50000
Writing CONFIGS/mountdata
Format ost8: /dev/lvm-Role_OSS/P8
CMD: onyx-32vm8 grep -c /mnt/lustre-ost8&apos; &apos; /proc/mounts
CMD: onyx-32vm8 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: onyx-32vm8 mkfs.lustre --mgsnode=onyx-32vm7@tcp --fsname=lustre --ost --index=7 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat /dev/lvm-Role_OSS/P8

   Permanent disk data:
Target:     lustre:OST0007
Index:      7
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: ,errors=remount-ro
Parameters: mgsnode=10.2.4.117@tcp sys.timeout=20

device size = 9912MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P8
	target name   lustre:OST0007
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre:OST0007  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=&quot;4290772992&quot;,lazy_journal_init -F /dev/lvm-Role_OSS/P8 50000
Writing CONFIGS/mountdata
start mds service on onyx-32vm7
CMD: onyx-32vm7 mkdir -p /mnt/lustre-mds1
CMD: onyx-32vm7 test -b /dev/lvm-Role_MDS/P1
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P1
Starting mds1:   /dev/lvm-Role_MDS/P1 /mnt/lustre-mds1
CMD: onyx-32vm7 mkdir -p /mnt/lustre-mds1; mount -t lustre   		                   /dev/lvm-Role_MDS/P1 /mnt/lustre-mds1
CMD: onyx-32vm7 /usr/sbin/lctl get_param -n health_check
CMD: onyx-32vm7 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm7 sync; sync; sync
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P1 2&amp;gt;/dev/null
Started lustre-MDT0000
start ost1 service on onyx-32vm8
CMD: onyx-32vm8 mkdir -p /mnt/lustre-ost1
CMD: onyx-32vm8 test -b /dev/lvm-Role_OSS/P1
CMD: onyx-32vm8 e2label /dev/lvm-Role_OSS/P1
Starting ost1:   /dev/lvm-Role_OSS/P1 /mnt/lustre-ost1
CMD: onyx-32vm8 mkdir -p /mnt/lustre-ost1; mount -t lustre   		                   /dev/lvm-Role_OSS/P1 /mnt/lustre-ost1
CMD: onyx-32vm8 /usr/sbin/lctl get_param -n health_check
CMD: onyx-32vm8 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: onyx-32vm8 e2label /dev/lvm-Role_OSS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm8 e2label /dev/lvm-Role_OSS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm8 sync; sync; sync
CMD: onyx-32vm8 e2label /dev/lvm-Role_OSS/P1 2&amp;gt;/dev/null
Started lustre-OST0000
CMD: onyx-32vm7 /usr/sbin/lctl set_param fail_val = 10 fail_loc=0x8000090e
onyx-32vm7: error: set_param: setting /proc/sys/lnet/fail_val==: Invalid argument
onyx-32vm7: error: set_param: param_path &apos;10&apos;: No such file or directory
mount lustre on /mnt/lustre.....
Starting client: onyx-32vm1.onyx.hpdd.intel.com:  -o user_xattr,flock onyx-32vm7@tcp:/lustre /mnt/lustre
CMD: onyx-32vm1.onyx.hpdd.intel.com mkdir -p /mnt/lustre
start mds service on onyx-32vm7
start mds service on onyx-32vm3
start mds service on onyx-32vm3
CMD: onyx-32vm1.onyx.hpdd.intel.com mount -t lustre -o user_xattr,flock onyx-32vm7@tcp:/lustre /mnt/lustre
CMD: onyx-32vm7 mkdir -p /mnt/lustre-mds3
CMD: onyx-32vm3 mkdir -p /mnt/lustre-mds2
CMD: onyx-32vm3 mkdir -p /mnt/lustre-mds4
CMD: onyx-32vm3 test -b /dev/lvm-Role_MDS/P2
CMD: onyx-32vm3 test -b /dev/lvm-Role_MDS/P4
CMD: onyx-32vm7 test -b /dev/lvm-Role_MDS/P3
CMD: onyx-32vm3 e2label /dev/lvm-Role_MDS/P4
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P3
CMD: onyx-32vm3 e2label /dev/lvm-Role_MDS/P2
Starting mds3:   /dev/lvm-Role_MDS/P3 /mnt/lustre-mds3
CMD: onyx-32vm7 mkdir -p /mnt/lustre-mds3; mount -t lustre   		                   /dev/lvm-Role_MDS/P3 /mnt/lustre-mds3
Starting mds4:   /dev/lvm-Role_MDS/P4 /mnt/lustre-mds4
CMD: onyx-32vm3 mkdir -p /mnt/lustre-mds4; mount -t lustre   		                   /dev/lvm-Role_MDS/P4 /mnt/lustre-mds4
Starting mds2:   /dev/lvm-Role_MDS/P2 /mnt/lustre-mds2
CMD: onyx-32vm3 mkdir -p /mnt/lustre-mds2; mount -t lustre   		                   /dev/lvm-Role_MDS/P2 /mnt/lustre-mds2
CMD: onyx-32vm7 /usr/sbin/lctl get_param -n health_check
CMD: onyx-32vm7 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P3 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P3 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: onyx-32vm7 sync; sync; sync
CMD: onyx-32vm7 e2label /dev/lvm-Role_MDS/P3 2&amp;gt;/dev/null
Started lustre-MDT0002
CMD: onyx-32vm7 lctl list_param osc.lustre-OST*-osc             &amp;gt; /dev/null 2&amp;gt;&amp;amp;1
CMD: onyx-32vm7 lctl get_param -n at_min
CMD: onyx-32vm7 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh wait_import_state FULL osc.lustre-OST0000-osc-MDT0000.ost_server_uuid 40 
onyx-32vm7: osc.lustre-OST0000-osc-MDT0000.ost_server_uuid in FULL state after 0 sec
CMD: onyx-32vm3 /usr/sbin/lctl lustre_build_version
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
/usr/lib64/lustre/tests/test-framework.sh: line 382: ( &amp;lt;&amp;lt; 16) | ( &amp;lt;&amp;lt; 8) | : syntax error: operand expected (error token is &quot;&amp;lt;&amp;lt; 16) | ( &amp;lt;&amp;lt; 8) | &quot;)
/usr/lib64/lustre/tests/test-framework.sh: line 5818: [: -le: unary operator expected
CMD: onyx-32vm3 /usr/sbin/lctl lustre_build_version
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
/usr/lib64/lustre/tests/test-framework.sh: line 382: ( &amp;lt;&amp;lt; 16) | ( &amp;lt;&amp;lt; 8) | : syntax error: operand expected (error token is &quot;&amp;lt;&amp;lt; 16) | ( &amp;lt;&amp;lt; 8) | &quot;)
/usr/lib64/lustre/tests/test-framework.sh: line 5803: [: -gt: unary operator expected
CMD: onyx-32vm3 lctl get_param -n at_min
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
CMD: onyx-32vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh wait_import_state FULL osc.lustre-OST0000-osc-MDT0001.ost_server_uuid 40 
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
 conf-sanity test_93: @@@@@@ FAIL: import is not in FULL state 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:4785:error()
  = /usr/lib64/lustre/tests/test-framework.sh:5976:_wait_osc_import_state()
  = /usr/lib64/lustre/tests/test-framework.sh:5991:wait_osc_import_state()
  = /usr/lib64/lustre/tests/conf-sanity.sh:6467:test_93()
  = /usr/lib64/lustre/tests/test-framework.sh:5049:run_one()
  = /usr/lib64/lustre/tests/test-framework.sh:5088:run_one_logged()
  = /usr/lib64/lustre/tests/test-framework.sh:4935:run_test()
  = /usr/lib64/lustre/tests/conf-sanity.sh:6473:main()
Dumping lctl log to /logdir/test_logs/2016-06-28/lustre-reviews-el7-x86_64--review-dne-part-1--1_6_1__40104__-69939819083780-054108/conf-sanity.test_93.*.1467152498.log
CMD: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2,onyx-32vm3,onyx-32vm7,onyx-32vm8 /usr/sbin/lctl dk &amp;gt; /logdir/test_logs/2016-06-28/lustre-reviews-el7-x86_64--review-dne-part-1--1_6_1__40104__-69939819083780-054108/conf-sanity.test_93.debug_log.\$(hostname -s).1467152498.log;
         dmesg &amp;gt; /logdir/test_logs/2016-06-28/lustre-reviews-el7-x86_64--review-dne-part-1--1_6_1__40104__-69939819083780-054108/conf-sanity.test_93.dmesg.\$(hostname -s).1467152498.log
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
Resetting fail_loc on all nodes...CMD: onyx-32vm1.onyx.hpdd.intel.com,onyx-32vm2,onyx-32vm3,onyx-32vm7,onyx-32vm8 lctl set_param -n fail_loc=0 	    fail_val=0 2&amp;gt;/dev/null || true
pdsh@onyx-32vm1: onyx-32vm3: mcmd: connect failed: Connection refused
done.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This occurs during conf-sanity/test_93 :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test_93() {
        [ $MDSCOUNT -lt 3 ] &amp;amp;&amp;amp; skip &quot;needs &amp;gt;= 3 MDTs&quot; &amp;amp;&amp;amp; return

        reformat
        #start mgs or mgs/mdt0
        if ! combined_mgs_mds ; then
                start_mgs
                start_mdt 1
        else
                start_mdt 1
        fi

        start_ost || error &quot;OST0 start fail&quot;

        #define OBD_FAIL_MGS_WRITE_TARGET_DELAY  0x90e
        do_facet mgs &quot;$LCTL set_param fail_val = 10 fail_loc=0x8000090e&quot;
        for num in $(seq 2 $MDSCOUNT); do
                start_mdt $num &amp;amp;    &amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;&amp;lt;
        done

        mount_client $MOUNT || error &quot;mount client fails&quot;
        wait_osc_import_state mds ost FULL
        wait_osc_import_state client ost FULL
        check_mount || error &quot;check_mount failed&quot;

        cleanup || error &quot;cleanup failed with $?&quot;
}
run_test 93 &quot;register mulitple MDT at the same time&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;and the reason of the failure is the following crash/LBUG found in onyx-32vm3/MDS Console log :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;15:21:36:[29395.748697] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds4
15:21:36:[29395.753612] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds2
15:21:36:[29396.019926] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_MDS/P4
15:21:36:[29396.024718] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_MDS/P2
15:21:36:[29396.306479] Lustre: DEBUG MARKER: e2label /dev/lvm-Role_MDS/P2
15:21:36:[29396.311613] Lustre: DEBUG MARKER: e2label /dev/lvm-Role_MDS/P4
15:21:36:[29396.577947] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds2; mount -t lustre   		                   /dev/lvm-Role_MDS/P2 /mnt/lustre-mds2
15:21:36:[29396.594860] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds4; mount -t lustre   		                   /dev/lvm-Role_MDS/P4 /mnt/lustre-mds4
15:21:36:[29396.743622] LDISKFS-fs (dm-1): mounted filesystem with ordered data mode. Opts: errors=remount-ro
15:21:36:[29396.750879] LDISKFS-fs (dm-3): mounted filesystem with ordered data mode. Opts: errors=remount-ro
15:21:36:[29396.909772] LustreError: 26347:0:(osd_handler.c:6468:osd_device_init0()) ASSERTION( info ) failed: 
15:21:36:[29396.912150] LustreError: 26347:0:(osd_handler.c:6468:osd_device_init0()) LBUG
15:21:36:[29396.915016] Pid: 26347, comm: mount.lustre
15:21:36:[29396.919614] 
15:21:36:[29396.919614] Call Trace:
15:21:36:[29396.922958]  [&amp;lt;ffffffffa05e67d3&amp;gt;] libcfs_debug_dumpstack+0x53/0x80 [libcfs]
15:21:36:[29396.925401]  [&amp;lt;ffffffffa05e6d75&amp;gt;] lbug_with_loc+0x45/0xc0 [libcfs]
15:21:36:[29396.927434]  [&amp;lt;ffffffffa0c24ccf&amp;gt;] osd_device_alloc+0x70f/0x880 [osd_ldiskfs]
15:21:36:[29396.929611]  [&amp;lt;ffffffffa07cd104&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
15:21:36:[29396.931618]  [&amp;lt;ffffffffa07cfb54&amp;gt;] class_setup+0x2f4/0x8d0 [obdclass]
15:21:36:[29396.933586]  [&amp;lt;ffffffffa07d3ee7&amp;gt;] class_process_config+0x1de7/0x2f70 [obdclass]
15:21:36:[29396.935800]  [&amp;lt;ffffffffa05f1957&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
15:21:36:[29396.937938] LDISKFS-fs (dm-1): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache
15:21:36:[29396.937939]  [&amp;lt;ffffffffa07dcb69&amp;gt;] do_lcfg+0x159/0x5d0 [obdclass]
15:21:36:[29396.937954]  [&amp;lt;ffffffffa07dd928&amp;gt;] lustre_start_simple+0x88/0x210 [obdclass]
15:21:36:[29396.937972]  [&amp;lt;ffffffffa0808ac4&amp;gt;] server_fill_super+0xf24/0x184c [obdclass]
15:21:36:[29396.937977]  [&amp;lt;ffffffffa05f1957&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
15:21:36:[29396.937991]  [&amp;lt;ffffffffa07e09e8&amp;gt;] lustre_fill_super+0x328/0x950 [obdclass]
15:21:36:[29396.938013]  [&amp;lt;ffffffffa07e06c0&amp;gt;] ? lustre_fill_super+0x0/0x950 [obdclass]
15:21:36:[29396.938019]  [&amp;lt;ffffffff811e1f2d&amp;gt;] mount_nodev+0x4d/0xb0
15:21:36:[29396.938033]  [&amp;lt;ffffffffa07d8918&amp;gt;] lustre_mount+0x38/0x60 [obdclass]
15:21:36:[29396.938034]  [&amp;lt;ffffffff811e28d9&amp;gt;] mount_fs+0x39/0x1b0
15:21:36:[29396.938038]  [&amp;lt;ffffffff811fe1af&amp;gt;] vfs_kern_mount+0x5f/0xf0
15:21:36:[29396.938039]  [&amp;lt;ffffffff812006fe&amp;gt;] do_mount+0x24e/0xa40
15:21:36:[29396.938043]  [&amp;lt;ffffffff8116e15e&amp;gt;] ? __get_free_pages+0xe/0x50
15:21:36:[29396.938044]  [&amp;lt;ffffffff81200f86&amp;gt;] SyS_mount+0x96/0xf0
15:21:36:[29396.938048]  [&amp;lt;ffffffff816463c9&amp;gt;] system_call_fastpath+0x16/0x1b
15:21:36:[29396.938048] 
15:21:36:[29396.968797] Kernel panic - not syncing: LBUG
15:21:36:[29396.969781] CPU: 0 PID: 26347 Comm: mount.lustre Tainted: G           OE  ------------   3.10.0-327.18.2.el7_lustre.x86_64 #1
15:21:36:[29396.969781] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
15:21:36:[29396.969781]  ffffffffa0603def 0000000048fb9a4f ffff880039073950 ffffffff81635c14
15:21:36:[29396.969781]  ffff8800390739d0 ffffffff8162f48a ffffffff00000008 ffff8800390739e0
15:21:36:[29396.969781]  ffff880039073980 0000000048fb9a4f ffffffffa0c511a0 0000000000000246
15:21:36:[29396.969781] Call Trace:
15:21:36:[29396.969781]  [&amp;lt;ffffffff81635c14&amp;gt;] dump_stack+0x19/0x1b
15:21:36:[29396.969781]  [&amp;lt;ffffffff8162f48a&amp;gt;] panic+0xd8/0x1e7
15:21:36:[29396.969781]  [&amp;lt;ffffffffa05e6ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa0c24ccf&amp;gt;] osd_device_alloc+0x70f/0x880 [osd_ldiskfs]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07cd104&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07cfb54&amp;gt;] class_setup+0x2f4/0x8d0 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07d3ee7&amp;gt;] class_process_config+0x1de7/0x2f70 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa05f1957&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07dcb69&amp;gt;] do_lcfg+0x159/0x5d0 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07dd928&amp;gt;] lustre_start_simple+0x88/0x210 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa0808ac4&amp;gt;] server_fill_super+0xf24/0x184c [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa05f1957&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07e09e8&amp;gt;] lustre_fill_super+0x328/0x950 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07e06c0&amp;gt;] ? lustre_common_put_super+0x270/0x270 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffff811e1f2d&amp;gt;] mount_nodev+0x4d/0xb0
15:21:36:[29396.969781]  [&amp;lt;ffffffffa07d8918&amp;gt;] lustre_mount+0x38/0x60 [obdclass]
15:21:36:[29396.969781]  [&amp;lt;ffffffff811e28d9&amp;gt;] mount_fs+0x39/0x1b0
15:21:36:[29396.969781]  [&amp;lt;ffffffff811fe1af&amp;gt;] vfs_kern_mount+0x5f/0xf0
15:21:36:[29396.969781]  [&amp;lt;ffffffff812006fe&amp;gt;] do_mount+0x24e/0xa40
15:21:36:[29396.969781]  [&amp;lt;ffffffff8116e15e&amp;gt;] ? __get_free_pages+0xe/0x50
15:21:36:[29396.969781]  [&amp;lt;ffffffff81200f86&amp;gt;] SyS_mount+0x96/0xf0
15:21:36:[29396.969781]  [&amp;lt;ffffffff816463c9&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Info required for matching: conf-sanity 93&lt;/p&gt;</description>
                <environment></environment>
        <key id="37888">LU-8346</key>
            <summary>conf-sanity test_93: test failed to respond and timed out</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="hongchao.zhang">Hongchao Zhang</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 29 Jun 2016 09:03:22 +0000</created>
                <updated>Tue, 3 Nov 2020 11:06:56 +0000</updated>
                            <resolved>Fri, 10 Jul 2020 20:14:08 +0000</resolved>
                                    <version>Lustre 2.10.0</version>
                                    <fixVersion>Lustre 2.10.0</fixVersion>
                    <fixVersion>Lustre 2.10.1</fixVersion>
                    <fixVersion>Lustre 2.11.0</fixVersion>
                    <fixVersion>Lustre 2.13.0</fixVersion>
                    <fixVersion>Lustre 2.12.1</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>18</watches>
                                                                            <comments>
                            <comment id="157237" author="bfaccini" created="Wed, 29 Jun 2016 09:14:23 +0000"  >&lt;p&gt;Looks like a new race scenario during parallel mount, but at feature/service level/layer (ie, not at target layer like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5299&quot; title=&quot;osd_start() LBUG when doing parallel mount of the same target&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5299&quot;&gt;&lt;del&gt;LU-5299&lt;/del&gt;&lt;/a&gt;/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5573&quot; title=&quot;Test timeout conf-sanity test_41c&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5573&quot;&gt;&lt;del&gt;LU-5573&lt;/del&gt;&lt;/a&gt;/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6553&quot; title=&quot;Recurrence of LU-5299: obd_mount_server.c:1690:osd_start()) ASSERTION( obd ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6553&quot;&gt;&lt;del&gt;LU-6553&lt;/del&gt;&lt;/a&gt;).&lt;br/&gt;
Will have a look to crash-dump to see how this happen and can be fixed.&lt;/p&gt;</comment>
                            <comment id="159916" author="yujian" created="Tue, 26 Jul 2016 16:04:30 +0000"  >&lt;p&gt;More failure instances on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/bdb7e32e-533d-11e6-b2ba-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/bdb7e32e-533d-11e6-b2ba-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b7749b9e-52dc-11e6-8968-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b7749b9e-52dc-11e6-8968-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This is affecting patch review testing on master branch.&lt;/p&gt;</comment>
                            <comment id="177400" author="yong.fan" created="Mon, 12 Dec 2016 02:34:48 +0000"  >&lt;p&gt;+1 on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1ba56f60-bfa4-11e6-bedd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1ba56f60-bfa4-11e6-bedd-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="188351" author="yong.fan" created="Wed, 15 Mar 2017 01:17:55 +0000"  >&lt;p&gt;+1 on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/171abdb8-090c-11e7-9053-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/171abdb8-090c-11e7-9053-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="189082" author="gerrit" created="Tue, 21 Mar 2017 13:13:33 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/26099&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/26099&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: guarantee all keys filled&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 12223b3133d3651dee29dfd940ca3c4f0e256cfd&lt;/p&gt;</comment>
                            <comment id="194588" author="yujian" created="Fri, 5 May 2017 00:16:17 +0000"  >&lt;p&gt;+1 on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a7d8dad6-30a5-11e7-8847-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a7d8dad6-30a5-11e7-8847-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="195621" author="gerrit" created="Fri, 12 May 2017 05:06:43 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/26099/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/26099/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: guarantee all keys filled&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: e58f8d609a81576eaf5bc9d0fa53bef274a01bfc&lt;/p&gt;</comment>
                            <comment id="195654" author="pjones" created="Fri, 12 May 2017 12:36:25 +0000"  >&lt;p&gt;Landed for 2.10&lt;/p&gt;</comment>
                            <comment id="197787" author="bfaccini" created="Thu, 1 Jun 2017 10:06:45 +0000"  >&lt;p&gt;Well, too bad but looks like I have triggered a new occurrence (&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6cf13ba8-46a7-11e7-bc6c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6cf13ba8-46a7-11e7-bc6c-5254006e85c2&lt;/a&gt;), even with &lt;a href=&quot;https://review.whamcloud.com/26099&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/26099&lt;/a&gt; applied.&lt;/p&gt;
</comment>
                            <comment id="198104" author="pjones" created="Mon, 5 Jun 2017 03:56:31 +0000"  >&lt;p&gt;It seems that the attempt to fix this issue has not been successful as the frequency of occurance is similar to before the fix. This frequency is not often enough so that we need to address this for 2.10 at this late stage. &lt;/p&gt;</comment>
                            <comment id="198108" author="gerrit" created="Mon, 5 Jun 2017 07:12:11 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27426&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27426&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obd: debug patch&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 40d89c405dd84bf4f36553ef72bedf49c18da956&lt;/p&gt;</comment>
                            <comment id="198260" author="gerrit" created="Tue, 6 Jun 2017 09:48:28 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27448&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27448&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: protect key_set_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 725fcbcaa1da4ffa554764a19c467394e5f71024&lt;/p&gt;</comment>
                            <comment id="201716" author="gerrit" created="Tue, 11 Jul 2017 19:39:22 +0000"  >&lt;p&gt;Patrick Farrell (paf@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27994&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27994&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: Set lc_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b432e08231b73c7b6c3a5e6fb5ab03a8de1e1778&lt;/p&gt;</comment>
                            <comment id="203864" author="gerrit" created="Sat, 29 Jul 2017 00:03:08 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/27994/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27994/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: Set lc_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 96f3fb788c230872e6d31185367a55ec3c4fedbc&lt;/p&gt;</comment>
                            <comment id="204671" author="gerrit" created="Mon, 7 Aug 2017 15:42:50 +0000"  >&lt;p&gt;Patrick Farrell (paf@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28405&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28405&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: Set lc_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 6225c3de5efeab340bba61895682923193c75821&lt;/p&gt;</comment>
                            <comment id="205914" author="gerrit" created="Mon, 21 Aug 2017 20:26:53 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/28405/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28405/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: Set lc_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 6aabd4a2760f1d42a788f6ad8712abdece7d1159&lt;/p&gt;</comment>
                            <comment id="205924" author="pjones" created="Mon, 21 Aug 2017 21:09:44 +0000"  >&lt;p&gt;Hongchao&lt;/p&gt;

&lt;p&gt;Does &lt;a href=&quot;https://review.whamcloud.com/#/c/27448/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/27448/&lt;/a&gt; still need to land or can it be abandoned?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="205950" author="hongchao.zhang" created="Tue, 22 Aug 2017 02:44:21 +0000"  >&lt;p&gt;Hi Peter,&lt;/p&gt;

&lt;p&gt;I think &lt;a href=&quot;https://review.whamcloud.com/#/c/27448/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/27448/&lt;/a&gt; is still needed.&lt;/p&gt;</comment>
                            <comment id="212041" author="adilger" created="Thu, 26 Oct 2017 07:49:03 +0000"  >&lt;p&gt;Hit this on master: &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/c3aecd26-b96a-11e7-8afb-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/c3aecd26-b96a-11e7-8afb-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="214367" author="yujian" created="Tue, 21 Nov 2017 19:44:58 +0000"  >&lt;p&gt;More failure instances on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/25246a58-ce73-11e7-a066-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/25246a58-ce73-11e7-a066-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b7d3e026-cbcc-11e7-8027-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b7d3e026-cbcc-11e7-8027-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/be17cd80-cb13-11e7-9840-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/be17cd80-cb13-11e7-9840-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="219082" author="gerrit" created="Thu, 25 Jan 2018 04:47:16 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/27448/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27448/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: protect key_set_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 4538de675cc1ee05408fa912e71c65d9109d7027&lt;/p&gt;</comment>
                            <comment id="219107" author="pjones" created="Thu, 25 Jan 2018 05:04:00 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="219162" author="gerrit" created="Thu, 25 Jan 2018 17:42:14 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31017&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31017&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: protect key_set_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 69c783d5c9934f9e5e0f59dee0ab9445bd2e6e3e&lt;/p&gt;</comment>
                            <comment id="219411" author="hongchao.zhang" created="Tue, 30 Jan 2018 08:45:08 +0000"  >&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/80fd399a-e6ad-40b4-8624-6ea2b73c1fd6&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/80fd399a-e6ad-40b4-8624-6ea2b73c1fd6&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="219412" author="gerrit" created="Tue, 30 Jan 2018 08:47:32 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31084&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31084&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: debug patch&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 3d738f569ac354563116f91f6c32f611ae6b1b54&lt;/p&gt;</comment>
                            <comment id="220620" author="gerrit" created="Fri, 9 Feb 2018 18:13:12 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31017/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31017/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; obdclass: protect key_set_version&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 823e1549f109412db4a8cb31e648819660c5f7b8&lt;/p&gt;</comment>
                            <comment id="225846" author="hongchao.zhang" created="Thu, 12 Apr 2018 10:09:02 +0000"  >&lt;p&gt;This issue can be reproduced if using &quot;rmmod -w osd-ldiskfs&quot; while mounting the MDT or OST, and the osd-ldiskfs module&lt;br/&gt;
is marked as &quot;MODULE_STATE_GOING&quot; and will be skipped in &quot;keys_fill&quot; called by &quot;lu_env_refill&quot;.&lt;/p&gt;</comment>
                            <comment id="225847" author="gerrit" created="Thu, 12 Apr 2018 10:15:06 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31971&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31971&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; osd-ldiskfs: don&apos;t assert if module is going&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b6bbcddb2dec31dc6019fc8177c35424a957ed22&lt;/p&gt;</comment>
                            <comment id="241146" author="gerrit" created="Fri, 1 Feb 2019 03:47:32 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/34155&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34155&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; tests: remove spaces around fail_val&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8d2909dab2e8af0d3301db14dec175a498d5f63b&lt;/p&gt;</comment>
                            <comment id="241167" author="jamesanunez" created="Fri, 1 Feb 2019 14:36:38 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;https://review.whamcloud.com/34155&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34155&lt;/a&gt; fixes the problem with setting fail_val in conf-sanity test 93:&lt;br/&gt;
do_facet mgs &quot;$LCTL set_param fail_val = 10 fail_loc=0x8000090e&quot;&lt;/p&gt;</comment>
                            <comment id="241731" author="gerrit" created="Mon, 11 Feb 2019 18:15:03 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/34226&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34226&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; tests: remove spaces around fail_val&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: dbb3a02fef7332b126668bb5b2d3066d77243f90&lt;/p&gt;</comment>
                            <comment id="242169" author="gerrit" created="Mon, 18 Feb 2019 06:38:41 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/34155/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34155/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; tests: remove spaces around fail_val&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 59cb4a5c39e2c85a89be2863a73899c02c9a89c3&lt;/p&gt;</comment>
                            <comment id="244195" author="gerrit" created="Tue, 19 Mar 2019 06:00:15 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/34226/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34226/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8346&quot; title=&quot;conf-sanity test_93: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8346&quot;&gt;&lt;del&gt;LU-8346&lt;/del&gt;&lt;/a&gt; tests: remove spaces around fail_val&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 430b20be17645989a51fb586824f7637535ff24e&lt;/p&gt;</comment>
                            <comment id="266090" author="simmonsja" created="Wed, 25 Mar 2020 14:14:03 +0000"  >&lt;p&gt;Is this work done.&lt;/p&gt;</comment>
                            <comment id="266828" author="adilger" created="Sat, 4 Apr 2020 01:11:54 +0000"  >&lt;p&gt;I searched back to the start of the year, and there were two timeouts for this test in the past 3 months, so this isn&apos;t really a high priority to fix:&lt;br/&gt;
2020-03-02 &lt;a href=&quot;https://testing.whamcloud.com/test_sets/ddf5643a-c7b9-4b0b-9b86-2adfe74817d3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/ddf5643a-c7b9-4b0b-9b86-2adfe74817d3&lt;/a&gt;&lt;br/&gt;
2020-02-24 &lt;a href=&quot;https://testing.whamcloud.com/test_sets/b0e66f0e-c3d4-49e9-9024-9e7910dd3d12&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/b0e66f0e-c3d4-49e9-9024-9e7910dd3d12&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="283380" author="apercher" created="Tue, 27 Oct 2020 14:11:40 +0000"  >&lt;p&gt;It seems that the patch 26099 has bad effect on parallel mounts on lustre client&lt;/p&gt;

&lt;p&gt;On a client node with lustre 2.12.5 after mounting the same filesystem twice&lt;br/&gt;
 in parallel then unmounting these filesystem, it is impossible to remove the&lt;br/&gt;
 lustre module from the kernel &lt;/p&gt;

&lt;p&gt;fstab:&lt;br/&gt;
 &amp;lt;serv1@ib1&amp;gt;:&amp;lt;serv2@ib1&amp;gt;:/fs1 /mnt/fs1 lustre defaults,_netdev,noauto,x-systemd.requires=lnet.service,flock,user_xattr,nosuid 0 0&lt;br/&gt;
 &amp;lt;serv1@ib1&amp;gt;:&amp;lt;serv2@ib1&amp;gt;:/fs1/home /mnt/home lustre defaults,_netdev,noauto,x-systemd.requires=lnet.service,flock,user_xattr,nosuid 0 0&lt;/p&gt;

&lt;p&gt;{{ systemctl start lnet&lt;br/&gt;
 modprobe lustre&lt;br/&gt;
 mount /mnt/home &amp;amp; mount /mnt/fs1&lt;br/&gt;
 umount /mnt/home&lt;br/&gt;
 umount /mnt/fs1&lt;br/&gt;
 rmmod lustre &amp;lt;- hang}}&lt;/p&gt;

&lt;p&gt;The rmmod stack in kernel is&lt;/p&gt;

&lt;p&gt;{{#0 __schedule&lt;br/&gt;
 #1 schedule&lt;br/&gt;
 #2 lu_contex_key_degister &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #3 lu_context_key_degister_many &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #4 vvp_global_fini &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #5 lustre_exit &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #6 __x64_sys_delete_module&lt;br/&gt;
 #7 do_syscall&lt;br/&gt;
 #8 entry_SYSCALL_64_after_hwframe&lt;br/&gt;
 }}&lt;br/&gt;
 crash&amp;gt; p vvp_thread_key.lct_used.counter&lt;br/&gt;
 $1 = 105&lt;br/&gt;
 crash&amp;gt; p vvp_session_key.lct_used.counter&lt;br/&gt;
 $2 = 51&lt;/p&gt;</comment>
                            <comment id="284116" author="eaujames" created="Tue, 3 Nov 2020 11:06:25 +0000"  >&lt;p&gt;@Antoine Percher I have created the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14110&quot; title=&quot;Race during several client mount instances (--&amp;gt; rmmod lustre hang)&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14110&quot;&gt;&lt;del&gt;LU-14110&lt;/del&gt;&lt;/a&gt; to follow your issue. This affects also the master branch (with less recurrences).&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="54228">LU-11814</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="55638">LU-12300</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="52559">LU-11089</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="58225">LU-13313</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyg4v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>