<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:40:29 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4190] LustreError: 18166:0:(genops.c:1570:obd_exports_barrier()) ASSERTION( list_empty(&amp;obd-&gt;obd_exports) ) failed: </title>
                <link>https://jira.whamcloud.com/browse/LU-4190</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;2 servers work normally at active-active status&lt;br/&gt;
1&#12289;Mount Lustre FS on the client and write and read data;&lt;br/&gt;
2&#12289;Umount the MDT on the Failnode; &lt;br/&gt;
3&#12289;Read data on the client from Lustre FS,successfully;&lt;br/&gt;
4&#12289;Mount the MDT on the Failnode;&lt;br/&gt;
5&#12289;Umount the MDT on the MGSnode;&lt;br/&gt;
6&#12289;Read data on the client from Lustre FS,failed;&lt;br/&gt;
7&#12289;Mount the MDT on the MGSnode,then the MGSnode crash,print information as follow:&lt;/p&gt;

&lt;p&gt;LustreError: 18166:0:(genops.c:320:class_newdev()) Device MGC192.168.22.50@tcp already exists at 2, won&apos;t add &lt;br/&gt;
LustreError: 18166:0:(obd_config.c:374:class_attach()) Cannot create device MGC192.168.22.50@tcp of type mgc : -17 &lt;br/&gt;
LustreError: 18166:0:(obd_mount.c:196:lustre_start_simple()) MGC192.168.22.50@tcp attach error -17 &lt;br/&gt;
LustreError: 18166:0:(obd_mount_server.c:844:lustre_disconnect_lwp()) lustre-MDT0000-lwp-MDT0000: Can&apos;t end config log lustre-client. &lt;br/&gt;
LustreError: 18166:0:(obd_mount_server.c:1426:server_put_super()) lustre-MDT0000: failed to disconnect lwp. (rc=-2) &lt;br/&gt;
LustreError: 18166:0:(obd_mount_server.c:1456:server_put_super()) no obd lustre-MDT0000 &lt;br/&gt;
LustreError: 18166:0:(obd_mount_server.c:135:server_deregister_mount()) lustre-MDT0000 not registered &lt;br/&gt;
LustreError: 18166:0:(genops.c:1570:obd_exports_barrier()) ASSERTION( list_empty(&amp;amp;obd-&amp;gt;obd_exports) ) failed: &lt;br/&gt;
LustreError: 18166:0:(genops.c:1570:obd_exports_barrier()) LBUG &lt;br/&gt;
Pid: 18166, comm: mount.lustre &lt;/p&gt;

&lt;p&gt;Call Trace: &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa070a8a5&amp;gt;&amp;#93;&lt;/span&gt; libcfs_debug_dumpstack+0x55/0x80 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa070aeb7&amp;gt;&amp;#93;&lt;/span&gt; lbug_with_loc+0x47/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0813d91&amp;gt;&amp;#93;&lt;/span&gt; obd_exports_barrier+0x181/0x190 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0f1e886&amp;gt;&amp;#93;&lt;/span&gt; mgs_device_fini+0xf6/0x5c0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mgs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa083e837&amp;gt;&amp;#93;&lt;/span&gt; class_cleanup+0x817/0xe00 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0817e2c&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x7c/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0842e9b&amp;gt;&amp;#93;&lt;/span&gt; class_process_config+0x1b6b/0x2f60 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa070bb90&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_alloc+0x30/0x60 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0844723&amp;gt;&amp;#93;&lt;/span&gt; class_manual_cleanup+0x493/0xe80 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8147a1fe&amp;gt;&amp;#93;&lt;/span&gt; ? _read_unlock+0xe/0x10 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0817e2c&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x7c/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087fb9d&amp;gt;&amp;#93;&lt;/span&gt; server_put_super+0x42d/0x2580 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0882440&amp;gt;&amp;#93;&lt;/span&gt; server_fill_super+0x750/0x1580 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa084fc98&amp;gt;&amp;#93;&lt;/span&gt; lustre_fill_super+0x1d8/0x530 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa084fac0&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_fill_super+0x0/0x530 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114d21f&amp;gt;&amp;#93;&lt;/span&gt; get_sb_nodev+0x5f/0xa0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa08473f5&amp;gt;&amp;#93;&lt;/span&gt; lustre_get_sb+0x25/0x30 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114c74b&amp;gt;&amp;#93;&lt;/span&gt; vfs_kern_mount+0x7b/0x1b0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114c8f2&amp;gt;&amp;#93;&lt;/span&gt; do_kern_mount+0x52/0x130 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81168912&amp;gt;&amp;#93;&lt;/span&gt; do_mount+0x2d2/0x8c0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81168f90&amp;gt;&amp;#93;&lt;/span&gt; sys_mount+0x90/0xe0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81002f5b&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b &lt;/p&gt;



&lt;p&gt;Message fromKernel panic - not syncing: LBUG &lt;br/&gt;
Pid: 18166, comm: mount.lustre Tainted: GF --------------- 2.6.32-358.6.2.l2.08 #2 &lt;br/&gt;
Call Trace: &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81476fa7&amp;gt;&amp;#93;&lt;/span&gt; ? panic+0xa1/0x163 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa070af0b&amp;gt;&amp;#93;&lt;/span&gt; ? lbug_with_loc+0x9b/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0813d91&amp;gt;&amp;#93;&lt;/span&gt; ? obd_exports_barrier+0x181/0x190 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0f1e886&amp;gt;&amp;#93;&lt;/span&gt; ? mgs_device_fini+0xf6/0x5c0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mgs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa083e837&amp;gt;&amp;#93;&lt;/span&gt; ? class_cleanup+0x817/0xe00 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0817e2c&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x7c/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0842e9b&amp;gt;&amp;#93;&lt;/span&gt; ? class_process_config+0x1b6b/0x2f60 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa070bb90&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_alloc+0x30/0x60 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0844723&amp;gt;&amp;#93;&lt;/span&gt; ? class_manual_cleanup+0x493/0xe80 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8147a1fe&amp;gt;&amp;#93;&lt;/span&gt; ? _read_unlock+0xe/0x10 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0817e2c&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x7c/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087fb9d&amp;gt;&amp;#93;&lt;/span&gt; ? server_put_super+0x42d/0x2580 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0882440&amp;gt;&amp;#93;&lt;/span&gt; ? server_fill_super+0x750/0x1580 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa084fc98&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_fill_super+0x1d8/0x530 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa084fac0&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_fill_super+0x0/0x530 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114d21f&amp;gt;&amp;#93;&lt;/span&gt; ? get_sb_nodev+0x5f/0xa0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa08473f5&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_get_sb+0x25/0x30 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt; &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114c74b&amp;gt;&amp;#93;&lt;/span&gt; ? vfs_kern_mount+0x7b/0x1b0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8114c8f2&amp;gt;&amp;#93;&lt;/span&gt; ? do_kern_mount+0x52/0x130 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81168912&amp;gt;&amp;#93;&lt;/span&gt; ? do_mount+0x2d2/0x8c0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81168f90&amp;gt;&amp;#93;&lt;/span&gt; ? sys_mount+0x90/0xe0 &lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81002f5b&amp;gt;&amp;#93;&lt;/span&gt; ? system_call_fastpath+0x16/0x1b &lt;br/&gt;
*******&lt;b&gt;show para for nt_memcpy16&lt;/b&gt;******* &lt;br/&gt;
src: ffff8802e118fc40, dst: ffffc901125a8d70, len: 80 &lt;br/&gt;
*******&lt;b&gt;show para for panic done&lt;/b&gt;******* &lt;/p&gt;
</description>
                <environment>Lustre2.4.0&#65292;with 2 servers and 1 client, kernel version:2.6.32-358.6.2.l2.08&lt;br/&gt;
MGSnode: mgs, 1 mdt and 4osts&lt;br/&gt;
Failnode:  1mdt and 4 osts</environment>
        <key id="21726">LU-4190</key>
            <summary>LustreError: 18166:0:(genops.c:1570:obd_exports_barrier()) ASSERTION( list_empty(&amp;obd-&gt;obd_exports) ) failed: </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="tappro">Mikhail Pershin</assignee>
                                    <reporter username="yueyuling">yueyuling</reporter>
                        <labels>
                    </labels>
                <created>Wed, 30 Oct 2013 08:29:16 +0000</created>
                <updated>Sun, 15 Dec 2019 08:27:19 +0000</updated>
                            <resolved>Sun, 15 Dec 2019 08:27:18 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="70267" author="adilger" created="Wed, 30 Oct 2013 16:17:15 +0000"  >&lt;blockquote&gt;
&lt;p&gt;2 servers work normally at active-active status&lt;br/&gt;
1&#12289;Mount Lustre FS on the client and write and read data;&lt;br/&gt;
2&#12289;Umount the MDT on the Failnode;&lt;br/&gt;
3&#12289;Read data on the client from Lustre FS,successfully;&lt;br/&gt;
4&#12289;Mount the MDT on the Failnode;&lt;br/&gt;
5&#12289;Umount the MDT on the MGSnode;&lt;br/&gt;
6&#12289;Read data on the client from Lustre FS,failed;&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Are you mounting the same MDT device (lustre-MDT0000) on both nodes?  That is bad and will lead to filesystem corruption.  You should only mount it on one MDS node at a time.  I suggest you enable &quot;MMP&quot; on your devices with &quot;tune2fs -O mmp /dev/&amp;lt;mdt_or_ost_device&amp;gt;&quot; (this happens automatically if you format the filesystem with --failnode).&lt;/p&gt;</comment>
                            <comment id="70325" author="yueyuling" created="Thu, 31 Oct 2013 01:23:54 +0000"  >&lt;p&gt;Thank you for your response! But I didn&apos;t mount the same MDT device on both nodes. There are two MDS devices in my Lustre FS. I mount one MDT device on each node.&lt;br/&gt;
So, I modify my descriptions as follow:&lt;br/&gt;
2 servers work normally at active-active status&lt;br/&gt;
1&#12289;Mount MGS, MDT0000 and 4 OSTs at MGSnode, mount MDT0001 and other 4 OSTs at Failnode, and Mount Lustre FS on the client and write and read data;&lt;br/&gt;
2&#12289;Umount the MDT0001 at the Failnode;&lt;br/&gt;
3&#12289;Read data on the client from Lustre FS,successfully;&lt;br/&gt;
4&#12289;Mount the MDT0001 on the Failnode;&lt;br/&gt;
5&#12289;Umount the MDT0000 on the MGSnode;&lt;br/&gt;
6&#12289;Read data on the client from Lustre FS,failed;&lt;br/&gt;
7&#12289;Mount the MDT0000 on the MGSnode, the MGSnode crash and output as follow:&lt;/p&gt;</comment>
                            <comment id="81098" author="di.wang" created="Sat, 5 Apr 2014 04:18:31 +0000"  >&lt;p&gt;I tried this test on current master. &lt;/p&gt;

&lt;p&gt;MDT1&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-2 ~]# mkfs.lustre --reformat --mgs --mdt --index=0 --fsname lustre --failnode=10.10.4.3@tcp /dev/disk/by-id/scsi-1IET_00040001
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDT2&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-3 ~]#  mkfs.lustre --reformat --mgsnode=10.10.4.2@tcp --mgsnode=10.10.4.3@tcp --mdt --index=1 --fsname lustre  --failnode=10.10.4.2@tcp /dev/disk/by-id/scsi-1IET_00020001 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;But unfortunately when it failed when I tries to mount mdt2&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-3 ~]# mount -t lustre /dev/disk/by-id/scsi-1IET_00020001 /mnt/mds2/
mount.lustre: mount /dev/sdj at /mnt/mds2 failed: No such file or directory
Is the MGS specification correct?
Is the filesystem name correct?
If upgrading, is the copied client log valid? (see upgrade docs)
[root@client-3 ~]# 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;...
LDISKFS-fs (sdj): mounted filesystem with ordered data mode. quota=on. Opts: 
Lustre: srv-lustre-MDT0001: No data found on store. Initialize space
Lustre: lustre-MDT0001: new disk, initializing
LustreError: 11-0: lustre-MDT0000-osp-MDT0001: Communicating with 10.10.4.2@tcp, operation mds_connect failed with -11.
LustreError: 13a-8: Failed to get MGS log params and no local copy.
LustreError: 2354:0:(obd_mount_server.c:699:lustre_lwp_add_conn()) lustre-MDT0001: can&apos;t find lwp device.
LustreError: 15c-8: MGC10.10.4.2@tcp: The configuration from log &apos;lustre-client&apos; failed (-2). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.
LustreError: 2242:0:(obd_mount_server.c:1321:server_start_targets()) lustre-MDT0001: failed to start LWP: -2
LustreError: 2242:0:(obd_mount_server.c:1776:server_fill_super()) Unable to start targets: -2
Lustre: Failing over lustre-MDT0001
Lustre: server umount lustre-MDT0001 complete
LustreError: 2242:0:(obd_mount.c:1338:lustre_fill_super()) Unable to mount  (-2)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;config log&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-2 ~]# llog_reader /mnt/mds1/CONFIGS/lustre-client 
Header size : 8192
Time : Fri Apr  4 20:36:36 2014
Number of records: 30
Target uuid : config_uuid 
-----------------------
#01 (224)marker   4 (flags=0x01, v2.5.57.0) lustre-clilov   &apos;lov setup&apos; Fri Apr  4 20:36:36 2014-
#02 (120)attach    0:lustre-clilov  1:lov  2:lustre-clilov_UUID  
#03 (168)lov_setup 0:lustre-clilov  1:(struct lov_desc)
		uuid=lustre-clilov_UUID  stripe:cnt=1 size=1048576 offset=18446744073709551615 pattern=0x1
#04 (224)marker   4 (flags=0x02, v2.5.57.0) lustre-clilov   &apos;lov setup&apos; Fri Apr  4 20:36:36 2014-
#05 (224)marker   5 (flags=0x01, v2.5.57.0) lustre-clilmv   &apos;lmv setup&apos; Fri Apr  4 20:36:36 2014-
#06 (120)attach    0:lustre-clilmv  1:lmv  2:lustre-clilmv_UUID  
#07 (168)lov_setup 0:lustre-clilmv  1:(struct lov_desc)
		uuid=lustre-clilmv_UUID  stripe:cnt=0 size=0 offset=0 pattern=0
#08 (224)marker   5 (flags=0x02, v2.5.57.0) lustre-clilmv   &apos;lmv setup&apos; Fri Apr  4 20:36:36 2014-
#09 (224)marker   6 (flags=0x01, v2.5.57.0) lustre-MDT0000  &apos;add mdc&apos; Fri Apr  4 20:36:36 2014-
#10 (080)add_uuid  nid=10.10.4.2@tcp(0x200000a0a0402)  0:  1:10.10.4.2@tcp  
#11 (128)attach    0:lustre-MDT0000-mdc  1:mdc  2:lustre-clilmv_UUID  
#12 (136)setup     0:lustre-MDT0000-mdc  1:lustre-MDT0000_UUID  2:10.10.4.2@tcp  
#13 (080)add_uuid  nid=10.10.4.3@tcp(0x200000a0a0403)  0:  1:10.10.4.3@tcp  
#14 (104)add_conn  0:lustre-MDT0000-mdc  1:10.10.4.3@tcp  
#15 (160)modify_mdc_tgts add 0:lustre-clilmv  1:lustre-MDT0000_UUID  2:0  3:1  4:lustre-MDT0000-mdc_UUID  
#16 (224)marker   6 (flags=0x02, v2.5.57.0) lustre-MDT0000  &apos;add mdc&apos; Fri Apr  4 20:36:36 2014-
#17 (224)marker   7 (flags=0x01, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:36:36 2014-
#18 (120)mount_option 0:  1:lustre-client  2:lustre-clilov  3:lustre-clilmv  
#19 (224)marker   7 (flags=0x02, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:36:36 2014-
#20 (224)marker  11 (flags=0x01, v2.5.57.0) lustre-MDT0001  &apos;add mdc&apos; Fri Apr  4 20:50:05 2014-
#21 (080)add_uuid  nid=10.10.4.3@tcp(0x200000a0a0403)  0:  1:10.10.4.3@tcp  
#22 (128)attach    0:lustre-MDT0001-mdc  1:mdc  2:lustre-clilmv_UUID  
#23 (136)setup     0:lustre-MDT0001-mdc  1:lustre-MDT0001_UUID  2:10.10.4.3@tcp  
#24 (080)add_uuid  nid=10.10.4.2@tcp(0x200000a0a0402)  0:  1:10.10.4.2@tcp  
#25 (104)add_conn  0:lustre-MDT0001-mdc  1:10.10.4.2@tcp  
#26 (160)modify_mdc_tgts add 0:lustre-clilmv  1:lustre-MDT0001_UUID  2:1  3:1  4:lustre-MDT0001-mdc_UUID  
#27 (224)marker  11 (flags=0x02, v2.5.57.0) lustre-MDT0001  &apos;add mdc&apos; Fri Apr  4 20:50:05 2014-
#28 (224)marker  12 (flags=0x01, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:50:05 2014-
#29 (120)mount_option 0:  1:lustre-client  2:lustre-clilov  3:lustre-clilmv  
#30 (224)marker  12 (flags=0x02, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:50:05 2014-
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It might be related with the change &lt;a href=&quot;http://review.whamcloud.com/7666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7666&lt;/a&gt;  Fan Yong, could you please comment here. Thanks!&lt;/p&gt;





</comment>
                            <comment id="81375" author="yong.fan" created="Thu, 10 Apr 2014 15:05:25 +0000"  >&lt;p&gt;The original issue happened on Lustre-2.4, but the patch &lt;a href=&quot;http://review.whamcloud.com/#/c/7666/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7666/&lt;/a&gt; only has been applied to Lustre-2.6, then even though such patch has some issues, it should not affect Lustre-2.4, right?&lt;/p&gt;</comment>
                            <comment id="81748" author="di.wang" created="Wed, 16 Apr 2014 16:48:44 +0000"  >&lt;p&gt;Oh, I am not asking the original issue shown in this ticket, but the failure I met in my test, which stops me continue the test on 2.6. Hmm, I will create a new ticket then. &lt;/p&gt;</comment>
                            <comment id="81796" author="yueyuling" created="Thu, 17 Apr 2014 01:31:15 +0000"  >&lt;p&gt;In addition, I&apos;ve created the MGS, MDT0000 and MDT0001 separately with different device. So, the MGS and MDT0000 are in different devices.&lt;/p&gt;</comment>
                            <comment id="83087" author="adilger" created="Fri, 2 May 2014 17:16:13 +0000"  >&lt;p&gt;Mike, could you please try configuring a test system as described here to see if a similar problem still exists in master?  This seems similar to the failure in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="83700" author="tappro" created="Sat, 10 May 2014 06:28:45 +0000"  >&lt;p&gt;I&apos;ve tried to repeat those steps after &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt; fix and everything works, please clarify how did you write/read data from client? I&apos;d try to repeat all steps as close as possible.&lt;/p&gt;</comment>
                            <comment id="83880" author="jlevi" created="Mon, 12 May 2014 17:24:58 +0000"  >&lt;p&gt;Duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="83990" author="tappro" created="Tue, 13 May 2014 15:00:08 +0000"  >&lt;p&gt;Jodi, this is not duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;, this was just blocked by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;. Reported bug happened in Lustre 2.4 and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt; doesn&apos;t even exist there. It looks like this issue doesn&apos;t exist in current master and is not blocker for 2.6, but it exists in 2.4 as reported.&lt;/p&gt;</comment>
                            <comment id="84028" author="doug" created="Tue, 13 May 2014 17:46:44 +0000"  >&lt;p&gt;This is not a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;.  &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt; blocks the ability to reproduce this issue, but does not resolve it.  As such, I am reopening and giving it a lower priority (since it cannot be reproduced thanks to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4916&quot; title=&quot;mount failure when adding failover node to mkfs.lustre&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4916&quot;&gt;&lt;del&gt;LU-4916&lt;/del&gt;&lt;/a&gt;).&lt;/p&gt;</comment>
                            <comment id="84068" author="yueyuling" created="Wed, 14 May 2014 03:20:05 +0000"  >&lt;p&gt;The steps of write/read data :&lt;br/&gt;
1&#12289;Mount Lustre FS on the client and write and read data&#65306;&lt;br/&gt;
     Repeat 100 times as follow&#65306; &lt;br/&gt;
              a&#12289;Create a new directory;&lt;br/&gt;
              b&#12289;Copy data from client to Lustre FS, 5 files per directory, each file is 1.2GB;&lt;br/&gt;
              c&#12289;Use MD5 to read the files in the directory and record the md5 value;&lt;br/&gt;
3&#12289;Read data on the client from Lustre FS,successfully;&lt;br/&gt;
      Use MD5 to read the files  which is writen at step 1.&lt;/p&gt;

</comment>
                            <comment id="259904" author="tappro" created="Sun, 15 Dec 2019 08:27:19 +0000"  >&lt;p&gt;Outdated&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="24242">LU-4916</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzw7bj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11330</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>