<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:46:57 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4916] mount failure when adding failover node to mkfs.lustre</title>
                <link>https://jira.whamcloud.com/browse/LU-4916</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I tried this test on current master.&lt;br/&gt;
MDT1&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-2 ~]# mkfs.lustre --reformat --mgs --mdt --index=0 --fsname lustre --failnode=10.10.4.3@tcp /dev/disk/by-id/scsi-1IET_00040001
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;MDT2&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-3 ~]#  mkfs.lustre --reformat --mgsnode=10.10.4.2@tcp --mgsnode=10.10.4.3@tcp --mdt --index=1 --fsname lustre  --failnode=10.10.4.2@tcp /dev/disk/by-id/scsi-1IET_00020001
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;But unfortunately when it failed when I tries to mount mdt2&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-3 ~]# mount -t lustre /dev/disk/by-id/scsi-1IET_00020001 /mnt/mds2/
mount.lustre: mount /dev/sdj at /mnt/mds2 failed: No such file or directory
Is the MGS specification correct?
Is the filesystem name correct?
If upgrading, is the copied client log valid? (see upgrade docs)
[root@client-3 ~]# 
...
LDISKFS-fs (sdj): mounted filesystem with ordered data mode. quota=on. Opts: 
Lustre: srv-lustre-MDT0001: No data found on store. Initialize space
Lustre: lustre-MDT0001: new disk, initializing
LustreError: 11-0: lustre-MDT0000-osp-MDT0001: Communicating with 10.10.4.2@tcp, operation mds_connect failed with -11.
LustreError: 13a-8: Failed to get MGS log params and no local copy.
LustreError: 2354:0:(obd_mount_server.c:699:lustre_lwp_add_conn()) lustre-MDT0001: can&apos;t find lwp device.
LustreError: 15c-8: MGC10.10.4.2@tcp: The configuration from log &apos;lustre-client&apos; failed (-2). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.
LustreError: 2242:0:(obd_mount_server.c:1321:server_start_targets()) lustre-MDT0001: failed to start LWP: -2
LustreError: 2242:0:(obd_mount_server.c:1776:server_fill_super()) Unable to start targets: -2
Lustre: Failing over lustre-MDT0001
Lustre: server umount lustre-MDT0001 complete
LustreError: 2242:0:(obd_mount.c:1338:lustre_fill_super()) Unable to mount  (-2)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;config log&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@client-2 ~]# llog_reader /mnt/mds1/CONFIGS/lustre-client 
Header size : 8192
Time : Fri Apr  4 20:36:36 2014
Number of records: 30
Target uuid : config_uuid 
-----------------------
#01 (224)marker   4 (flags=0x01, v2.5.57.0) lustre-clilov   &apos;lov setup&apos; Fri Apr  4 20:36:36 2014-
#02 (120)attach    0:lustre-clilov  1:lov  2:lustre-clilov_UUID  
#03 (168)lov_setup 0:lustre-clilov  1:(struct lov_desc)
		uuid=lustre-clilov_UUID  stripe:cnt=1 size=1048576 offset=18446744073709551615 pattern=0x1
#04 (224)marker   4 (flags=0x02, v2.5.57.0) lustre-clilov   &apos;lov setup&apos; Fri Apr  4 20:36:36 2014-
#05 (224)marker   5 (flags=0x01, v2.5.57.0) lustre-clilmv   &apos;lmv setup&apos; Fri Apr  4 20:36:36 2014-
#06 (120)attach    0:lustre-clilmv  1:lmv  2:lustre-clilmv_UUID  
#07 (168)lov_setup 0:lustre-clilmv  1:(struct lov_desc)
		uuid=lustre-clilmv_UUID  stripe:cnt=0 size=0 offset=0 pattern=0
#08 (224)marker   5 (flags=0x02, v2.5.57.0) lustre-clilmv   &apos;lmv setup&apos; Fri Apr  4 20:36:36 2014-
#09 (224)marker   6 (flags=0x01, v2.5.57.0) lustre-MDT0000  &apos;add mdc&apos; Fri Apr  4 20:36:36 2014-
#10 (080)add_uuid  nid=10.10.4.2@tcp(0x200000a0a0402)  0:  1:10.10.4.2@tcp  
#11 (128)attach    0:lustre-MDT0000-mdc  1:mdc  2:lustre-clilmv_UUID  
#12 (136)setup     0:lustre-MDT0000-mdc  1:lustre-MDT0000_UUID  2:10.10.4.2@tcp  
#13 (080)add_uuid  nid=10.10.4.3@tcp(0x200000a0a0403)  0:  1:10.10.4.3@tcp  
#14 (104)add_conn  0:lustre-MDT0000-mdc  1:10.10.4.3@tcp  
#15 (160)modify_mdc_tgts add 0:lustre-clilmv  1:lustre-MDT0000_UUID  2:0  3:1  4:lustre-MDT0000-mdc_UUID  
#16 (224)marker   6 (flags=0x02, v2.5.57.0) lustre-MDT0000  &apos;add mdc&apos; Fri Apr  4 20:36:36 2014-
#17 (224)marker   7 (flags=0x01, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:36:36 2014-
#18 (120)mount_option 0:  1:lustre-client  2:lustre-clilov  3:lustre-clilmv  
#19 (224)marker   7 (flags=0x02, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:36:36 2014-
#20 (224)marker  11 (flags=0x01, v2.5.57.0) lustre-MDT0001  &apos;add mdc&apos; Fri Apr  4 20:50:05 2014-
#21 (080)add_uuid  nid=10.10.4.3@tcp(0x200000a0a0403)  0:  1:10.10.4.3@tcp  
#22 (128)attach    0:lustre-MDT0001-mdc  1:mdc  2:lustre-clilmv_UUID  
#23 (136)setup     0:lustre-MDT0001-mdc  1:lustre-MDT0001_UUID  2:10.10.4.3@tcp  
#24 (080)add_uuid  nid=10.10.4.2@tcp(0x200000a0a0402)  0:  1:10.10.4.2@tcp  
#25 (104)add_conn  0:lustre-MDT0001-mdc  1:10.10.4.2@tcp  
#26 (160)modify_mdc_tgts add 0:lustre-clilmv  1:lustre-MDT0001_UUID  2:1  3:1  4:lustre-MDT0001-mdc_UUID  
#27 (224)marker  11 (flags=0x02, v2.5.57.0) lustre-MDT0001  &apos;add mdc&apos; Fri Apr  4 20:50:05 2014-
#28 (224)marker  12 (flags=0x01, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:50:05 2014-
#29 (120)mount_option 0:  1:lustre-client  2:lustre-clilov  3:lustre-clilmv  
#30 (224)marker  12 (flags=0x02, v2.5.57.0) lustre-client   &apos;mount opts&apos; Fri Apr  4 20:50:05 2014-
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It might be related with the change &lt;a href=&quot;http://review.whamcloud.com/7666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7666&lt;/a&gt; Fan Yong, could you please comment here. Thanks!&lt;/p&gt;</description>
                <environment></environment>
        <key id="24242">LU-4916</key>
            <summary>mount failure when adding failover node to mkfs.lustre</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="tappro">Mikhail Pershin</assignee>
                                    <reporter username="di.wang">Di Wang</reporter>
                        <labels>
                            <label>dne</label>
                    </labels>
                <created>Wed, 16 Apr 2014 16:52:31 +0000</created>
                <updated>Thu, 14 Aug 2014 22:24:10 +0000</updated>
                            <resolved>Tue, 13 May 2014 15:42:11 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="81858" author="jlevi" created="Thu, 17 Apr 2014 17:50:27 +0000"  >&lt;p&gt;Mike,&lt;br/&gt;
Can you please take this one?&lt;br/&gt;
Thank you!&lt;/p&gt;</comment>
                            <comment id="82234" author="tappro" created="Wed, 23 Apr 2014 03:14:51 +0000"  >&lt;p&gt;OK, I am looking at this&lt;/p&gt;</comment>
                            <comment id="83077" author="adilger" created="Fri, 2 May 2014 16:02:49 +0000"  >&lt;p&gt;Mike, have you had a chance to try the above steps?&lt;/p&gt;</comment>
                            <comment id="83394" author="tappro" created="Wed, 7 May 2014 14:52:36 +0000"  >&lt;p&gt;Andreas, I can reproduce that with the same error message about LWP. I am checking Di proposal to check commit &lt;a href=&quot;http://review.whamcloud.com/7666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7666&lt;/a&gt; now&lt;/p&gt;</comment>
                            <comment id="83556" author="tappro" created="Thu, 8 May 2014 18:48:27 +0000"  >&lt;p&gt;this issue was introduced in &lt;a href=&quot;http://review.whamcloud.com/7666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7666&lt;/a&gt; as Di supposed. The lwp device is not set up for every MDT-MDT, but lwp connection is added even for missed lwp device.&lt;/p&gt;

&lt;p&gt;Patch is here: &lt;a href=&quot;http://review.whamcloud.com/10272&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/10272&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="84003" author="jlevi" created="Tue, 13 May 2014 15:42:11 +0000"  >&lt;p&gt;Patch landed to Master.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="20963">LU-3951</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="21726">LU-4190</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwka7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>13580</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>