<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:42:33 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4419] Test failure on test suite recovery-small, subtest test_110a</title>
                <link>https://jira.whamcloud.com/browse/LU-4419</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for nasf &amp;lt;fan.yong@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/e0c22550-6db7-11e3-a191-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/e0c22550-6db7-11e3-a191-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Slave MDT cannot allocate super-sequence:&lt;/p&gt;

&lt;p&gt;07:41:38:Lustre: DEBUG MARKER: == recovery-small test 110a: create remote directory: drop client req == 07:40:37 (1387986037)&lt;br/&gt;
07:41:38:LustreError: 31316:0:(fid_handler.c:285:__seq_server_alloc_meta()) srv-lustre-MDT0001: Can&apos;t allocate super-sequence, rc -5&lt;br/&gt;
07:41:38:Lustre: DEBUG MARKER: /usr/sbin/lctl mark  recovery-small test_110a: @@@@@@ FAIL: lfs mkdir failed &lt;br/&gt;
07:41:38:Lustre: DEBUG MARKER: recovery-small test_110a: @@@@@@ FAIL: lfs mkdir failed&lt;/p&gt;</description>
                <environment></environment>
        <key id="22588">LU-4419</key>
            <summary>Test failure on test suite recovery-small, subtest test_110a</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="tappro">Mikhail Pershin</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Sun, 29 Dec 2013 06:51:21 +0000</created>
                <updated>Sun, 22 Jul 2018 09:01:06 +0000</updated>
                            <resolved>Sun, 22 Jul 2018 09:01:06 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="74146" author="yong.fan" created="Sun, 29 Dec 2013 06:55:04 +0000"  >&lt;p&gt;We have hit the failure several times:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/e0c22550-6db7-11e3-a191-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/e0c22550-6db7-11e3-a191-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/e553b5f8-6f2d-11e3-ad93-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/e553b5f8-6f2d-11e3-ad93-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/fe06d80c-6ebd-11e3-ad93-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/fe06d80c-6ebd-11e3-ad93-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74149" author="di.wang" created="Sun, 29 Dec 2013 17:12:58 +0000"  >&lt;p&gt;Hmm, I am not sure this is a new bug or only exists in your or Mike&apos;s patch series, since I do not see it exists in other&apos;s patch. Please correct me, if I am wrong. Are your patches still dependent?&lt;/p&gt;</comment>
                            <comment id="74151" author="yong.fan" created="Sun, 29 Dec 2013 23:54:06 +0000"  >&lt;p&gt;I cannot search the test results history because of Maloo issues. The first known failure instance was found in Mike&apos;s patch. But I do not think it is special issue in such patch, but more like general master bug. Because his patch does not touch MDT/FID stack. Current, LFSCK patches still depends on Mike&apos;s patch.&lt;/p&gt;</comment>
                            <comment id="74153" author="di.wang" created="Mon, 30 Dec 2013 02:33:58 +0000"  >&lt;p&gt;oh, it is not about MDT/FID stack. The failure is because the connection is somehow broken between other MDTs/OSTs to MDT0, which cause these target can not allocate the new FID sequence from MDT0. Hmm if your patch still depends on Mike&apos;s patch, it is probably Mike&apos;s patch problem, since I never saw this problem in current master and even in the run of other people&apos;s patch.&lt;/p&gt;</comment>
                            <comment id="74177" author="sarah" created="Mon, 30 Dec 2013 22:48:49 +0000"  >&lt;p&gt;another instance:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/1e071d22-706e-11e3-9fe0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/1e071d22-706e-11e3-9fe0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74365" author="tappro" created="Sun, 5 Jan 2014 18:51:10 +0000"  >&lt;p&gt;probably I&apos;ve found the source of problem, let&apos;s wait for the latest patch test results, &lt;a href=&quot;http://review.whamcloud.com/#/c/7383/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7383/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74466" author="tappro" created="Tue, 7 Jan 2014 06:40:02 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/ca3dea64-7751-11e3-943d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/ca3dea64-7751-11e3-943d-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Now it works as expected. The problem was the lost chunk of code with OBD_FAIL_CHECK needed for tests.&lt;/p&gt;</comment>
                            <comment id="90629" author="jhammond" created="Fri, 1 Aug 2014 13:09:41 +0000"  >&lt;p&gt;Another instance &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2e799af4-1942-11e4-8c4a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2e799af4-1942-11e4-8c4a-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;22:16:47:Lustre: DEBUG MARKER: == recovery-small test 110a: create remote directory: drop client req == 22:14:17 (1406870057)&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: lctl set_param -n mdt.lustre*.enable_remote_dir=1&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: lctl set_param -n mdt.lustre*.enable_remote_dir=1&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: lctl set_param -n mdt.lustre*.enable_remote_dir=1&lt;br/&gt;
22:16:47:LustreError: 167-0: lustre-MDT0000-lwp-MDT0001: This client was evicted by lustre-MDT0000; in progress operations using this service will fail.&lt;br/&gt;
22:16:47:LustreError: Skipped 2 previous similar messages&lt;br/&gt;
22:16:47:LustreError: 16796:0:(fid_handler.c:284:__seq_server_alloc_meta()) srv-lustre-MDT0001: Can&apos;t allocate super-sequence, rc -5&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: /usr/sbin/lctl mark  recovery-small test_110a: @@@@@@ FAIL: lfs mkdir failed &lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: recovery-small test_110a: @@@@@@ FAIL: lfs mkdir failed&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /logdir/test_logs/2014-07-31/lustre-reviews-el6-x86_64-&lt;del&gt;review-dne-part-1&lt;/del&gt;-2_7_1_&lt;em&gt;25543&lt;/em&gt;_-70280407083100-154359/recovery-small.test_110a.debug_log.$(hostname -s).1406870060.log;&lt;br/&gt;
22:16:47:         dmesg &amp;gt; /logdir/test_logs/2014-07-31/lustre-revi&lt;br/&gt;
22:16:47:Lustre: DEBUG MARKER: /usr/sbin/lctl mark == recovery-small test 110b: create remote directory: drop Master rep == 22:14:24 &amp;#40;1406870064&amp;#41;&lt;/p&gt;</comment>
                            <comment id="108409" author="jamesanunez" created="Mon, 2 Mar 2015 15:30:55 +0000"  >&lt;p&gt;Another instance on 2.7.0-RC2. Logs at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/20cb4134-bf80-11e4-881f-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/20cb4134-bf80-11e4-881f-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Client test log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== recovery-small test 110a: create remote directory: drop client req == 08:02:47 (1425139367)
CMD: onyx-41vm3 lctl set_param fail_loc=0x123
fail_loc=0x123
CMD: onyx-41vm6.onyx.hpdd.intel.com /usr/bin/lfs mkdir -i 1 -c2 /mnt/lustre/d110a.recovery-small/remote_dir
error on LL_IOC_LMV_SETSTRIPE &apos;/mnt/lustre/d110a.recovery-small/remote_dir&apos; (3): Input/output error
error: mkdir: create stripe dir &apos;/mnt/lustre/d110a.recovery-small/remote_dir&apos; failed
CMD: onyx-41vm3 lctl set_param fail_loc=0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDT same as the log John posted above.&lt;/p&gt;</comment>
                            <comment id="116400" author="jamesanunez" created="Tue, 26 May 2015 16:11:36 +0000"  >&lt;p&gt;This test is still failing occasionally. Recent failures are:&lt;br/&gt;
2015-04-27 23:46:54  - &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/3da2d940-ed41-11e4-b3fc-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/3da2d940-ed41-11e4-b3fc-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-05-06 15:04:26 - &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/73c503ac-f40a-11e4-b108-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/73c503ac-f40a-11e4-b108-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-05-14 09:21:11 - &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/77b99d6e-fa23-11e4-8c8b-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/77b99d6e-fa23-11e4-8c8b-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-05-17 00:07:52 - &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/fcaf8172-fc31-11e4-a658-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/fcaf8172-fc31-11e4-a658-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-05-26 10:10:02 - &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/283e0a20-0399-11e5-a102-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/283e0a20-0399-11e5-a102-5254006e85c2&lt;/a&gt;&lt;/p&gt;
</comment>
                            <comment id="116433" author="jamesanunez" created="Tue, 26 May 2015 18:02:07 +0000"  >&lt;p&gt;Mike, &lt;br/&gt;
Would you please look at these recent test failures?&lt;br/&gt;
Thank you.&lt;/p&gt;</comment>
                            <comment id="230709" author="tappro" created="Sun, 22 Jul 2018 09:01:06 +0000"  >&lt;p&gt;the releated test issue was fixed and remaining problems are &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7612&quot; title=&quot;recovery-small tests 110a, 110b, 110c, 110d, 110e, 110f fail with  &amp;#39;lfs mkdir failed&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7612&quot;&gt;LU-7612&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwbxr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12134</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>