<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:25:45 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2502] Test failure on test suite ost-pools, subtest test_23a</title>
                <link>https://jira.whamcloud.com/browse/LU-2502</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for liuying &amp;lt;emoly.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/71f17b88-4826-11e2-8cdc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/71f17b88-4826-11e2-8cdc-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_23a failed with the following error:&lt;/p&gt;

&lt;blockquote&gt;&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Client-1 console log showed&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;23:05:49:Lustre: DEBUG MARKER: == ost-pools test 23a: OST pools and quota =========================================================== 23:05:40 (1355727940)
23:05:49:Lustre: DEBUG MARKER: lctl get_param -n lov.lustre-*.pools.testpool 2&amp;gt;/dev/null || echo foo
23:05:49:Lustre: DEBUG MARKER: lctl get_param -n lov.lustre-*.pools.testpool | sort -u | tr &apos;\n&apos; &apos; &apos; 
23:06:01:Lustre: DEBUG MARKER: lctl get_param -n lov.lustre-*.pools.testpool | sort -u | tr &apos;\n&apos; &apos; &apos; 
23:06:01:LustreError: 19086:0:(quota_ctl.c:328:client_quota_ctl()) ptlrpc_queue_wait failed, rc: -114
23:06:01:LustreError: 19087:0:(lmv_obd.c:855:lmv_iocontrol()) error: iocontrol MDC lustre-MDT0000_UUID on MDTidx 0 cmd 800866a1: err = -22
23:06:01:LustreError: 19087:0:(lmv_obd.c:855:lmv_iocontrol()) error: iocontrol MDC lustre-MDT0000_UUID on MDTidx 0 cmd 800866a1: err = -22
23:06:12:LustreError: 19087:0:(lmv_obd.c:855:lmv_iocontrol()) error: iocontrol MDC lustre-MDT0000_UUID on MDTidx 0 cmd 800866a1: err = -22
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS dmesg log showed&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == ost-pools test 23a: OST pools and quota =========================================================== 23:05:40 (1355727940)
Lustre: DEBUG MARKER: lctl pool_new lustre.testpool
Lustre: DEBUG MARKER: lctl pool_add lustre.testpool lustre-OST[0000-0006/3]
Lustre: 4861:0:(quota_master.c:793:close_quota_files()) quota[0] is off already
Lustre: 4861:0:(quota_master.c:793:close_quota_files()) Skipped 1 previous similar message
LustreError: 12634:0:(fsfilt-ldiskfs.c:2181:fsfilt_ldiskfs_quotacheck()) quotacheck failed: rc = -22
LustreError: 12634:0:(quota_check.c:112:target_quotacheck_thread()) mdd_obd-lustre-MDT0000: fsfilt_quotacheck: -22
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="16946">LU-2502</key>
            <summary>Test failure on test suite ost-pools, subtest test_23a</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="2">Won&apos;t Fix</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>quota</label>
                            <label>test</label>
                            <label>yuc2</label>
                    </labels>
                <created>Mon, 17 Dec 2012 08:02:27 +0000</created>
                <updated>Fri, 18 Nov 2016 04:24:35 +0000</updated>
                            <resolved>Fri, 18 Nov 2016 04:24:35 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.1.4</version>
                    <version>Lustre 2.4.1</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="49304" author="johann" created="Mon, 17 Dec 2012 09:19:10 +0000"  >&lt;p&gt;Was this interop tests or pure 2.1 testing?&lt;/p&gt;</comment>
                            <comment id="49305" author="johann" created="Mon, 17 Dec 2012 09:19:51 +0000"  >&lt;p&gt;cc Emoly.&lt;/p&gt;</comment>
                            <comment id="49307" author="pjones" created="Mon, 17 Dec 2012 09:28:43 +0000"  >&lt;p&gt;Johann&lt;/p&gt;

&lt;p&gt;I believe that this issue hit on a review run for this patch to b2_1 &lt;a href=&quot;http://review.whamcloud.com/#change,4831&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,4831&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="49357" author="niu" created="Tue, 18 Dec 2012 01:59:41 +0000"  >&lt;p&gt;maybe it&apos;s caused by some changes in the new kernel? (2.6.32-279.14.1.el6_lustre.g9963a82.x86_64)&lt;/p&gt;</comment>
                            <comment id="49359" author="yujian" created="Tue, 18 Dec 2012 02:30:35 +0000"  >&lt;p&gt;Searching the historical reports on Maloo showed that this issue did not occur on b2_1 branch before, neither on 2.1.4 RC1:&lt;br/&gt;
&lt;a href=&quot;http://tinyurl.com/cjevqxt&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://tinyurl.com/cjevqxt&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="49419" author="niu" created="Tue, 18 Dec 2012 23:50:42 +0000"  >&lt;blockquote&gt;
&lt;p&gt;Searching the historical reports on Maloo showed that this issue did not occur on b2_1 branch before, neither on 2.1.4 RC1:&lt;br/&gt;
&lt;a href=&quot;http://tinyurl.com/cjevqxt&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://tinyurl.com/cjevqxt&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yes, then it seems not a new kenrel issue (there were some tests with new kernel passed).&lt;/p&gt;</comment>
                            <comment id="49674" author="niu" created="Wed, 26 Dec 2012 02:15:41 +0000"  >&lt;p&gt;Looks it was not reproduced in b2_1 tests anymore.&lt;/p&gt;</comment>
                            <comment id="52560" author="niu" created="Sun, 17 Feb 2013 02:04:33 +0000"  >&lt;p&gt;can&apos;t be reproduced.&lt;/p&gt;</comment>
                            <comment id="53029" author="sarah" created="Tue, 26 Feb 2013 04:38:49 +0000"  >&lt;p&gt;I think the following error found in interop between 2.1.4 server and 2.4 client is the same one, so reopen it.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/3539abfa-7d84-11e2-85d0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/3539abfa-7d84-11e2-85d0-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;MDS console shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;10:46:09:Lustre: DEBUG MARKER: == ost-pools test 23a: OST pools and quota == 10:46:09 (1361558769)
10:46:09:Lustre: DEBUG MARKER: lctl pool_new lustre.testpool
10:46:20:Lustre: DEBUG MARKER: lctl pool_add lustre.testpool lustre-OST[0000-0006/3]
10:46:31:Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n version
10:46:31:Lustre: 3818:0:(quota_master.c:795:close_quota_files()) quota[0] is off already
10:46:31:Lustre: 3818:0:(quota_master.c:795:close_quota_files()) quota[1] is off already
10:46:31:LustreError: 13718:0:(fsfilt-ldiskfs.c:2181:fsfilt_ldiskfs_quotacheck()) quotacheck failed: rc = -22
10:46:31:LustreError: 13718:0:(quota_check.c:114:target_quotacheck_thread()) mdd_obd-lustre-MDT0000: fsfilt_quotacheck: -22
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="55174" author="niu" created="Mon, 1 Apr 2013 08:22:04 +0000"  >&lt;p&gt;Hi, Sarah, is it reproducable? Could you try to catch the log on MDS with D_TRACE enabled? So we can see how fsfilt_ldiskfs_quotacheck() failed with -22. Thank you.&lt;/p&gt;</comment>
                            <comment id="55204" author="sarah" created="Mon, 1 Apr 2013 17:27:24 +0000"  >&lt;p&gt;ok, will let you know when I get the results.&lt;/p&gt;</comment>
                            <comment id="56034" author="sarah" created="Wed, 10 Apr 2013 17:56:04 +0000"  >&lt;p&gt;I reran the test 3 times and cannot reproduce it with server is running 2.1.5 and client is running 2.4&lt;/p&gt;</comment>
                            <comment id="66329" author="yujian" created="Wed, 11 Sep 2013 11:39:28 +0000"  >&lt;p&gt;Lustre client: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_4/45/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_4/45/&lt;/a&gt;  (2.4.1 RC2)&lt;br/&gt;
Lustre server: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/215/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/215/&lt;/a&gt; (2.1.6)&lt;/p&gt;

&lt;p&gt;ost-pools test 23a hit the same failure:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/2f0438ae-1abe-11e3-bf23-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/2f0438ae-1abe-11e3-bf23-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="110583" author="yong.fan" created="Wed, 25 Mar 2015 04:17:08 +0000"  >&lt;p&gt;Another failure instance on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/314d4b36-d288-11e4-a0e2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/314d4b36-d288-11e4-a0e2-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="174181" author="niu" created="Fri, 18 Nov 2016 04:24:24 +0000"  >&lt;p&gt;This was an interop issue between 2.1 client with 2.4 servers. I think it&apos;s not relevant anymore. The last instance on master reported by nasf is a different issue.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvdxb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5864</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>