<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:26:45 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2620] Failure on test suite replay-ost-single test_6: test_6 failed with 1</title>
                <link>https://jira.whamcloud.com/browse/LU-2620</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/478c299a-5ef8-11e2-b507-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/478c299a-5ef8-11e2-b507-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_6 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test_6 failed with 1&lt;/p&gt;&lt;/blockquote&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== replay-ost-single test 6: Fail OST before obd_destroy == 17:27:58 (1358126878)
Waiting for orphan cleanup...
CMD: client-32vm3 /usr/sbin/lctl get_param -n osp.*osc*.old_sync_processed
Waiting for local destroys to complete
1280+0 records in
1280+0 records out
5242880 bytes (5.2 MB) copied, 0.970226 s, 5.4 MB/s
/mnt/lustre/d0.replay-ost-single/f.replay-ost-single.6
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_layout_gen:     0
lmm_stripe_offset:  0
	obdidx		 objid		objid		 group
	     0	           193	         0xc1	             0

CMD: client-32vm3 lctl set_param fail_loc=0x80000119
fail_loc=0x80000119
before: 12650184 after_dd: 13693644
 replay-ost-single test_6: @@@@@@ FAIL: test_6 failed with 1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="17176">LU-2620</key>
            <summary>Failure on test suite replay-ost-single test_6: test_6 failed with 1</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>HB</label>
                    </labels>
                <created>Tue, 15 Jan 2013 14:07:55 +0000</created>
                <updated>Thu, 5 Sep 2013 07:24:25 +0000</updated>
                            <resolved>Thu, 7 Mar 2013 18:50:55 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.1.4</version>
                    <version>Lustre 1.8.9</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                    <fixVersion>Lustre 2.1.5</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="50507" author="adilger" created="Tue, 15 Jan 2013 17:27:11 +0000"  >&lt;p&gt;I think this problem may have been induced by the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2494&quot; title=&quot;error: get_param: /proc/{fs,sys}/{lnet,lustre}/obdfilter/*/mds_sync: Found no match&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2494&quot;&gt;&lt;del&gt;LU-2494&lt;/del&gt;&lt;/a&gt; landing of &lt;a href=&quot;http://review.whamcloud.com/4885&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4885&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="50521" author="bogl" created="Tue, 15 Jan 2013 20:34:17 +0000"  >&lt;p&gt;So far haven&apos;t been able to reproduce this failure locally.&lt;/p&gt;

&lt;p&gt;If indeed it is due to some problem with &lt;a href=&quot;http://review.whamcloud.com/4885&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4885&lt;/a&gt;, for example returning from the function wait_mds_ost_sync() too quickly, shouldn&apos;t that be causing failures in all tests that use this function?  This seems the only failure reported.&lt;/p&gt;</comment>
                            <comment id="50558" author="bogl" created="Wed, 16 Jan 2013 13:37:35 +0000"  >&lt;p&gt;Seems like the preceding test 5 must run in order to trigger the problem.  I note that it did run in the failing test set.  With the SLOW=no default it is skipped.  I think that&apos;s why I was having difficulty reproducing the problem.&lt;/p&gt;</comment>
                            <comment id="50561" author="bogl" created="Wed, 16 Jan 2013 14:03:12 +0000"  >&lt;p&gt;bzzz, in comments in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2494&quot; title=&quot;error: get_param: /proc/{fs,sys}/{lnet,lustre}/obdfilter/*/mds_sync: Found no match&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2494&quot;&gt;&lt;del&gt;LU-2494&lt;/del&gt;&lt;/a&gt; you said that old_sync_processed was the correct new flag to use.  However I can&apos;t see it ever going to 0 when I do a sync command on the client.  Seems like once it gets set to 1 near the beginning of time it stays 1 forever.  If so this makes it not a good analog for the mds_sync variable in older versions.  If this is true then Andreas is correct and my fix for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2494&quot; title=&quot;error: get_param: /proc/{fs,sys}/{lnet,lustre}/obdfilter/*/mds_sync: Found no match&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2494&quot;&gt;&lt;del&gt;LU-2494&lt;/del&gt;&lt;/a&gt; probably led to this problem.&lt;/p&gt;</comment>
                            <comment id="50563" author="bzzz" created="Wed, 16 Jan 2013 14:25:20 +0000"  >&lt;p&gt;yes, this represents only old requests (left from the previous boot).  mds_sync (repesenting u.filter.fo_mds_ost_sync in obdfilter) can&apos;t go to 0 as well, as MDS-OST recovery happens once at the startup.&lt;/p&gt;</comment>
                            <comment id="50569" author="bogl" created="Wed, 16 Jan 2013 14:45:33 +0000"  >&lt;p&gt;In discussion with bzzz he suggests using the existing function wait_delete_completed_mds() to force the previous rm to finish before moving on.  Apparently this kind of failure isn&apos;t new and not due to the fix for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2494&quot; title=&quot;error: get_param: /proc/{fs,sys}/{lnet,lustre}/obdfilter/*/mds_sync: Found no match&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2494&quot;&gt;&lt;del&gt;LU-2494&lt;/del&gt;&lt;/a&gt;.  The existing calls in test_6 of wait_mds_ost_sync and wait_destroy_complete and sync aren&apos;t sufficient to guarantee that kbytesavail is stable before starting the test.   Investigating this solution.  If it works in local test I will work up a patch presently.&lt;/p&gt;</comment>
                            <comment id="50576" author="bogl" created="Wed, 16 Jan 2013 15:12:37 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/5042&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5042&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="52663" author="sarah" created="Mon, 18 Feb 2013 20:34:45 +0000"  >&lt;p&gt;another failure seen in zfs: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/c34fa2a2-7788-11e2-987d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/c34fa2a2-7788-11e2-987d-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="52998" author="yujian" created="Mon, 25 Feb 2013 22:22:10 +0000"  >&lt;p&gt;Lustre b2_1 client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/176&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/176&lt;/a&gt;&lt;br/&gt;
Lustre master server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-master/1269&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-master/1269&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64&lt;/p&gt;

&lt;p&gt;The same failure occurred: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/93369ad0-7d78-11e2-85d0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/93369ad0-7d78-11e2-85d0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53366" author="adilger" created="Tue, 5 Mar 2013 14:46:00 +0000"  >&lt;p&gt;This is still failing in master several times every day:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/7b9df70e-8531-11e2-bfd3-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/7b9df70e-8531-11e2-bfd3-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/3d973ca8-8532-11e2-9ab1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/3d973ca8-8532-11e2-9ab1-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/35cfc004-8513-11e2-9ab1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/35cfc004-8513-11e2-9ab1-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/7e5e9684-8512-11e2-bfd3-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/7e5e9684-8512-11e2-bfd3-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53372" author="bogl" created="Tue, 5 Mar 2013 14:59:24 +0000"  >&lt;p&gt;I think the failures being seen now look different.  The original reported bug showed only test_6 failing, and that only after running test_5 due to SLOW=yes.  New failures now show test_6 and following tests all failing. Possibly an entirely new underlying cause.&lt;/p&gt;</comment>
                            <comment id="53373" author="adilger" created="Tue, 5 Mar 2013 15:03:50 +0000"  >&lt;p&gt;Looks like all of the new failures are on review-zfs test runs, so it may be due to a different cause?&lt;/p&gt;</comment>
                            <comment id="53374" author="bogl" created="Tue, 5 Mar 2013 15:05:44 +0000"  >&lt;p&gt;Also original failure was seen regardless of fstype.  New ones appear to be only on zfs, if I&apos;m not mistaken.&lt;/p&gt;</comment>
                            <comment id="53375" author="adilger" created="Tue, 5 Mar 2013 15:06:34 +0000"  >&lt;p&gt;Let&apos;s task &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2903&quot; title=&quot;replay-ost-single test_6 and test_7 failure&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2903&quot;&gt;&lt;del&gt;LU-2903&lt;/del&gt;&lt;/a&gt; for this new failure, since it does appear that this one was only hit when test_5 was being run.&lt;/p&gt;</comment>
                            <comment id="53376" author="bogl" created="Tue, 5 Mar 2013 15:06:48 +0000"  >&lt;p&gt;Andreas, Looks like we are on the same page suspecting a different cause.&lt;/p&gt;</comment>
                            <comment id="53527" author="yujian" created="Thu, 7 Mar 2013 07:11:03 +0000"  >&lt;p&gt;Hello Oleg,&lt;/p&gt;

&lt;p&gt;Could you please cherry-pick the patch of &lt;a href=&quot;http://review.whamcloud.com/5042&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5042&lt;/a&gt; to Lustre b2_1 branch since the failure occurs in 2.1.4&amp;lt;-&amp;gt;2.4.0 interop testing? Thanks.&lt;/p&gt;</comment>
                            <comment id="53558" author="pjones" created="Thu, 7 Mar 2013 18:50:55 +0000"  >&lt;p&gt;closing again as the new issue is being tracked under &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2903&quot; title=&quot;replay-ost-single test_6 and test_7 failure&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2903&quot;&gt;&lt;del&gt;LU-2903&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="54874" author="sarah" created="Tue, 26 Mar 2013 22:39:11 +0000"  >&lt;p&gt;Hit this bug in interop between 1.8.9 client and 2.4 server, the server build is #1338 which should include the fix of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2903&quot; title=&quot;replay-ost-single test_6 and test_7 failure&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2903&quot;&gt;&lt;del&gt;LU-2903&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b683542a-948d-11e2-93c6-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b683542a-948d-11e2-93c6-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="54876" author="bogl" created="Tue, 26 Mar 2013 23:12:11 +0000"  >&lt;p&gt;I think this failure is expected.  The patch of &lt;a href=&quot;http://review.whamcloud.com/5042&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5042&lt;/a&gt; was cherry picked to b2_1, which fixed the problem for 2.1/2.4 interop.  This was never done for b1_8 as far as I can see so the problem was never fixed for 1.8.9/2.4 interop.&lt;/p&gt;</comment>
                            <comment id="65804" author="yujian" created="Thu, 5 Sep 2013 07:24:25 +0000"  >&lt;p&gt;Lustre client: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b1_8/258/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b1_8/258/&lt;/a&gt; (1.8.9-wc1)&lt;br/&gt;
Lustre server: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_4/44/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_4/44/&lt;/a&gt; (2.4.1 RC1)&lt;/p&gt;

&lt;p&gt;replay-ost-single test 6 hit the same failure:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6c0c8652-15c3-11e3-87cb-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6c0c8652-15c3-11e3-87cb-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="16012">LU-1966</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="16931">LU-2494</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="17711">LU-2874</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="16085">LU-2011</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvfbz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6130</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>