<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:20:02 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8728] Fix conf-sanity:88 for the multiple MDS case</title>
                <link>https://jira.whamcloud.com/browse/LU-8728</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;start_mds call starts all MDSs. lctl clear_conf fails because it expects only one mds combined with mgs started with nosvc option. Only start_mdt call is to be used to start needed mds.&lt;/p&gt;
</description>
                <environment></environment>
        <key id="40797">LU-8728</key>
            <summary>Fix conf-sanity:88 for the multiple MDS case</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="arshad512">Arshad Hussain</reporter>
                        <labels>
                    </labels>
                <created>Wed, 19 Oct 2016 03:26:03 +0000</created>
                <updated>Fri, 8 Dec 2017 21:35:44 +0000</updated>
                            <resolved>Fri, 8 Dec 2017 21:35:44 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="170270" author="gerrit" created="Wed, 19 Oct 2016 04:28:42 +0000"  >&lt;p&gt;Arshad Hussain (arshad.hussain@seagate.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/23246&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23246&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8728&quot; title=&quot;Fix conf-sanity:88 for the multiple MDS case&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8728&quot;&gt;&lt;del&gt;LU-8728&lt;/del&gt;&lt;/a&gt; tests: fix conf-sanity:88 for the multiple MDS case&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 1f9c29af71015534d3c5c608de7b50b81fdad634&lt;/p&gt;</comment>
                            <comment id="170272" author="arshad512" created="Wed, 19 Oct 2016 05:02:42 +0000"  >&lt;p&gt;Test result on local: 88a&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;== conf-sanity test 88a: test lctl clear_conf fsname == 22:03:06 (1475944386)
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
../lnet/lnet/lnet options: &lt;span class=&quot;code-quote&quot;&gt;&apos;networks=tcp0(eth1) accept=all&apos;&lt;/span&gt;
gss/krb5 is not supported
quota/lquota options: &lt;span class=&quot;code-quote&quot;&gt;&apos;hash_lqs_cur_bits=3&apos;&lt;/span&gt;
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
start mds service on node1.domain
Starting mds1:   -o loop /tmp/lustre-mdt1 /mnt/lustre-mds1
Commit the device label on /tmp/lustre-mdt1
Started lustre-MDT0000
start ost1 service on node1.domain
Starting ost1:   -o loop /tmp/lustre-ost1 /mnt/lustre-ost1
Commit the device label on /tmp/lustre-ost1
Started lustre-OST0000
mount lustre on /mnt/lustre.....
Starting client: node1.domain:  -o user_xattr,flock node1.domain@tcp:/lustre /mnt/lustre
Setting lustre-MDT0000.mdd.atime_diff from 60 to 62
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 2s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;62&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;62&apos;&lt;/span&gt;
Setting lustre-MDT0000.mdd.atime_diff from 62 to 63
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 5s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;63&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;63&apos;&lt;/span&gt;
Setting lustre.llite.max_read_ahead_mb from 27.13 to 32
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 8s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;32&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;32&apos;&lt;/span&gt;
Setting lustre.llite.max_read_ahead_mb from 32 to 64
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 6s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;64&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;64&apos;&lt;/span&gt;
Pool lustre.pool1 created
OST lustre-OST0000_UUID added to pool lustre.pool1
OST lustre-OST0000_UUID removed from pool lustre.pool1
OST lustre-OST0000_UUID added to pool lustre.pool1
umount lustre on /mnt/lustre.....
Stopping client node1.domain /mnt/lustre (opts:)
stop ost1 service on node1.domain
Stopping /mnt/lustre-ost1 (opts:-f) on node1.domain
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
start mds service on node1.domain
Starting mds1: -o nosvc,loop  /tmp/lustre-mdt1 /mnt/lustre-mds1
Start /tmp/lustre-mdt1 without service
Started lustre-MDT0000
debugfs 1.42.13.wc3 (28-Aug-2015)
/tmp/lustre-mdt1: catastrophic mode - not reading inode or group bitmaps
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
debugfs 1.42.13.wc3 (28-Aug-2015)
/tmp/lustre-mdt1: catastrophic mode - not reading inode or group bitmaps
start mds service on node1.domain
Starting mds1:   -o loop /tmp/lustre-mdt1 /mnt/lustre-mds1
Started lustre-MDT0000
start ost1 service on node1.domain
Starting ost1:   -o loop /tmp/lustre-ost1 /mnt/lustre-ost1
Started lustre-OST0000
mount lustre on /mnt/lustre.....
Starting client: node1.domain:  -o user_xattr,flock node1.domain@tcp:/lustre /mnt/lustre
umount lustre on /mnt/lustre.....
Stopping client node1.domain /mnt/lustre (opts:)
stop ost1 service on node1.domain
Stopping /mnt/lustre-ost1 (opts:-f) on node1.domain
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
modules unloaded.
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
../lnet/lnet/lnet options: &lt;span class=&quot;code-quote&quot;&gt;&apos;networks=tcp0(eth1) accept=all&apos;&lt;/span&gt;
gss/krb5 is not supported
quota/lquota options: &lt;span class=&quot;code-quote&quot;&gt;&apos;hash_lqs_cur_bits=3&apos;&lt;/span&gt;
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
Resetting fail_loc on all nodes...done.
22:05:36 (1475944536) waiting &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; node1.domain network 5 secs ...
22:05:36 (1475944536) network &lt;span class=&quot;code-keyword&quot;&gt;interface&lt;/span&gt; is UP
PASS 88a (150s)
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
gss/krb5 is not supported
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
== conf-sanity test complete, duration 198 sec == 22:05:39 (1475944539)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Test result on local: 88b&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;== conf-sanity test 88b: test lctl clear_conf one config == 22:07:00 (1475944620)
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
../lnet/lnet/lnet options: &lt;span class=&quot;code-quote&quot;&gt;&apos;networks=tcp0(eth1) accept=all&apos;&lt;/span&gt;
gss/krb5 is not supported
quota/lquota options: &lt;span class=&quot;code-quote&quot;&gt;&apos;hash_lqs_cur_bits=3&apos;&lt;/span&gt;
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
start mds service on node1.domain
Starting mds1:   -o loop /tmp/lustre-mdt1 /mnt/lustre-mds1
Commit the device label on /tmp/lustre-mdt1
Started lustre-MDT0000
start ost1 service on node1.domain
Starting ost1:   -o loop /tmp/lustre-ost1 /mnt/lustre-ost1
Commit the device label on /tmp/lustre-ost1
Started lustre-OST0000
mount lustre on /mnt/lustre.....
Starting client: node1.domain:  -o user_xattr,flock node1.domain@tcp:/lustre /mnt/lustre
Setting lustre-MDT0000.mdd.atime_diff from 60 to 62
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 6s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;62&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;62&apos;&lt;/span&gt;
Setting lustre-MDT0000.mdd.atime_diff from 62 to 63
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 7s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;63&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;63&apos;&lt;/span&gt;
Setting lustre.llite.max_read_ahead_mb from 27.13 to 32
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 7s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;32&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;32&apos;&lt;/span&gt;
Setting lustre.llite.max_read_ahead_mb from 32 to 64
Waiting 90 secs &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; update
Updated after 6s: wanted &lt;span class=&quot;code-quote&quot;&gt;&apos;64&apos;&lt;/span&gt; got &lt;span class=&quot;code-quote&quot;&gt;&apos;64&apos;&lt;/span&gt;
Pool lustre.pool1 created
OST lustre-OST0000_UUID added to pool lustre.pool1
OST lustre-OST0000_UUID removed from pool lustre.pool1
OST lustre-OST0000_UUID added to pool lustre.pool1
umount lustre on /mnt/lustre.....
Stopping client node1.domain /mnt/lustre (opts:)
stop ost1 service on node1.domain
Stopping /mnt/lustre-ost1 (opts:-f) on node1.domain
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
start mds service on node1.domain
Starting mds1: -o nosvc,loop  /tmp/lustre-mdt1 /mnt/lustre-mds1
Start /tmp/lustre-mdt1 without service
Started lustre-MDT0000
debugfs 1.42.13.wc3 (28-Aug-2015)
/tmp/lustre-mdt1: catastrophic mode - not reading inode or group bitmaps
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
debugfs 1.42.13.wc3 (28-Aug-2015)
/tmp/lustre-mdt1: catastrophic mode - not reading inode or group bitmaps
start mds service on node1.domain
Starting mds1:   -o loop /tmp/lustre-mdt1 /mnt/lustre-mds1
Started lustre-MDT0000
start ost1 service on node1.domain
Starting ost1:   -o loop /tmp/lustre-ost1 /mnt/lustre-ost1
Started lustre-OST0000
mount lustre on /mnt/lustre.....
Starting client: node1.domain:  -o user_xattr,flock node1.domain@tcp:/lustre /mnt/lustre
umount lustre on /mnt/lustre.....
Stopping client node1.domain /mnt/lustre (opts:)
stop ost1 service on node1.domain
Stopping /mnt/lustre-ost1 (opts:-f) on node1.domain
stop mds service on node1.domain
Stopping /mnt/lustre-mds1 (opts:-f) on node1.domain
modules unloaded.
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
../lnet/lnet/lnet options: &lt;span class=&quot;code-quote&quot;&gt;&apos;networks=tcp0(eth1) accept=all&apos;&lt;/span&gt;
gss/krb5 is not supported
quota/lquota options: &lt;span class=&quot;code-quote&quot;&gt;&apos;hash_lqs_cur_bits=3&apos;&lt;/span&gt;
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
Resetting fail_loc on all nodes...done.
22:09:30 (1475944770) waiting &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; node1.domain network 5 secs ...
22:09:30 (1475944770) network &lt;span class=&quot;code-keyword&quot;&gt;interface&lt;/span&gt; is UP
PASS 88b (151s)
Stopping clients: node1.domain /mnt/lustre (opts:)
Stopping clients: node1.domain /mnt/lustre2 (opts:)
Loading modules from /root/hpdd/lustre-wc/lustre/tests/..
detected 1 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=-1
subsystem_debug=all
gss/krb5 is not supported
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
== conf-sanity test complete, duration 190 sec == 22:09:33 (1475944773)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="215842" author="adilger" created="Fri, 8 Dec 2017 21:35:44 +0000"  >&lt;p&gt;Closing this as a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8727&quot; title=&quot;Remove skip records from config file&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8727&quot;&gt;&lt;del&gt;LU-8727&lt;/del&gt;&lt;/a&gt;, since that patch introduces this problem and it should be fixed in that patch before it is landed rather than having a separate ticket for it.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10322">
                    <name>Gantt End to Start</name>
                                                                <inwardlinks description="has to be done after">
                                        <issuelink>
            <issuekey id="40796">LU-8727</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzysg7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>