<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:17:52 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8473] conf-sanity test_41a with separate MGS stuck on starting client and timed out</title>
                <link>https://jira.whamcloud.com/browse/LU-8473</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While running conf-sanity test 41a with separate MGT and MDT0000 devices, it failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== conf-sanity test 41a: mount mds with --nosvc and --nomgs == 23:48:16 (1470181696)
../libcfs/libcfs/libcfs options: &apos;cpu_npartitions=2&apos;
CMD: eagle-38vm3 test -b /dev/vda6
start mds service on eagle-38vm3
CMD: eagle-38vm3 mkdir -p /mnt/lustre-mds1
Loading modules from /usr/lib64/lustre
detected 2 online CPUs by sysfs
Force libcfs to create 2 CPU partitions
debug=-1
subsystem_debug=all -lnet -lnd -pinger
CMD: eagle-38vm3 test -b /dev/vda6
CMD: eagle-38vm3 e2label /dev/vda6
Starting mds1: -o nosvc -n  /dev/vda6 /mnt/lustre-mds1
CMD: eagle-38vm3 mkdir -p /mnt/lustre-mds1; mount -t lustre -o nosvc -n  		                   /dev/vda6 /mnt/lustre-mds1
nomtab: 1
CMD: eagle-38vm3 /usr/sbin/lctl get_param -n health_check
CMD: eagle-38vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/openmpi/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin::/sbin:/bin:/usr/sbin: NAME=ncli sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 24 
Start /dev/vda6 without service
CMD: eagle-38vm3 e2label /dev/vda6 2&amp;gt;/dev/null
Started lustre-MDT0000
CMD: eagle-38vm4 mkdir -p /mnt/lustre-ost1
CMD: eagle-38vm4 test -b /dev/vda5
CMD: eagle-38vm4 e2label /dev/vda5
Starting ost1:   /dev/vda5 /mnt/lustre-ost1
CMD: eagle-38vm4 mkdir -p /mnt/lustre-ost1; mount -t lustre   		                   /dev/vda5 /mnt/lustre-ost1
CMD: eagle-38vm4 /usr/sbin/lctl get_param -n health_check
CMD: eagle-38vm4 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/openmpi/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin::/sbin:/bin:/usr/sbin: NAME=ncli sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 24 
CMD: eagle-38vm4 e2label /dev/vda5 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: eagle-38vm4 e2label /dev/vda5 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: eagle-38vm4 e2label /dev/vda5 2&amp;gt;/dev/null
Started lustre-OST0000
start mds service on eagle-38vm3
CMD: eagle-38vm3 mkdir -p /mnt/lustre-mds1
CMD: eagle-38vm3 test -b /dev/vda6
CMD: eagle-38vm3 e2label /dev/vda6
Starting mds1: -o nomgs,force  /dev/vda6 /mnt/lustre-mds1
CMD: eagle-38vm3 mkdir -p /mnt/lustre-mds1; mount -t lustre -o nomgs,force  		                   /dev/vda6 /mnt/lustre-mds1
eagle-38vm3: mount.lustre: mount /dev/vda6 at /mnt/lustre-mds1 failed: Operation already in progress
eagle-38vm3: The target service is already running. (/dev/vda6)
force: 1
Start of /dev/vda6 on mds1 failed 114
mount lustre on /mnt/lustre.....
Starting client: eagle-38vm1:  -o user_xattr,flock eagle-38vm3@tcp:/lustre /mnt/lustre
CMD: eagle-38vm1 mkdir -p /mnt/lustre
CMD: eagle-38vm1 mount -t lustre -o user_xattr,flock eagle-38vm3@tcp:/lustre /mnt/lustre
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/25ba3a02-590c-11e6-b2e2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/25ba3a02-590c-11e6-b2e2-5254006e85c2&lt;/a&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="38583">LU-8473</key>
            <summary>conf-sanity test_41a with separate MGS stuck on starting client and timed out</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yujian">Jian Yu</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Tue, 2 Aug 2016 23:57:31 +0000</created>
                <updated>Mon, 10 Oct 2016 16:32:17 +0000</updated>
                            <resolved>Mon, 15 Aug 2016 22:27:15 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="160631" author="gerrit" created="Wed, 3 Aug 2016 01:12:34 +0000"  >&lt;p&gt;Jian Yu (jian.yu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21651&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21651&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8473&quot; title=&quot;conf-sanity test_41a with separate MGS stuck on starting client and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8473&quot;&gt;&lt;del&gt;LU-8473&lt;/del&gt;&lt;/a&gt; tests: skip conf-sanity test 41a with separate MGT and MDT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b902e6977fd81a2b961e6cec36f45bef6a10990a&lt;/p&gt;</comment>
                            <comment id="161941" author="gerrit" created="Mon, 15 Aug 2016 21:11:17 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/21651/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21651/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8473&quot; title=&quot;conf-sanity test_41a with separate MGS stuck on starting client and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8473&quot;&gt;&lt;del&gt;LU-8473&lt;/del&gt;&lt;/a&gt; tests: skip conf-sanity test 41a with separate MGT and MDT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 14b23d67a71cf2aa0b571553171a0894c73f11e6&lt;/p&gt;</comment>
                            <comment id="161961" author="pjones" created="Mon, 15 Aug 2016 22:27:15 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="40392">LU-8688</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyjdb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>