<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:24:24 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2342] replay-single test_20b: @@@@@@ FAIL: after 6912 &gt; before 6784</title>
                <link>https://jira.whamcloud.com/browse/LU-2342</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Commit: 1c0dfc1ae9637cbfe5dabc0ea67c29633b5b04ec (Dec 17, 2011)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7762668c-318d-11e1-9c6d-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7762668c-318d-11e1-9c6d-5254004bbbd3&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== replay-single test 20b: write, unlink, eviction, replay, (test mds_cleanup_orphans) == 19:32:44 (1325043164)
/mnt/lustre/f20b
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_stripe_offset:  0
	obdidx		 objid		objid		 group
	     0	          1150	        0x47e	             0

stat: cannot read file system information for `/mnt/lustre&apos;: Interrupted system call
10000+0 records in
10000+0 records out
40960000 bytes (41 MB) copied, 2.43149 s, 16.8 MB/s
Failing mds1 on node fat-intel-3vm3
Stopping /mnt/mds1 (opts:)
affected facets: mds1
Failover mds1 to fat-intel-3vm3
19:33:10 (1325043190) waiting for fat-intel-3vm3 network 900 secs ...
19:33:10 (1325043190) network interface is UP
Starting mds1: -o user_xattr,acl  lustre-mdt1/mdt1 /mnt/mds1
fat-intel-3vm3: debug=0x33f0404
fat-intel-3vm3: subsystem_debug=0xffb7e3ff
fat-intel-3vm3: debug_mb=32
Started lustre-MDT0000
affected facets: mds1
fat-intel-3vm3: *.lustre-MDT0000.recovery_status status: COMPLETE
Waiting for orphan cleanup...
before 6784, after 6912
UUID                   1K-blocks        Used   Available Use% Mounted on
lustre-MDT0000_UUID     29443456        5760    29435648   0% /mnt/lustre[MDT:0]
lustre-OST0000_UUID      2031872         896     1994112   0% /mnt/lustre[OST:0]
lustre-OST0001_UUID      2031872         896     2028032   0% /mnt/lustre[OST:1]
lustre-OST0002_UUID      2031872         896     1823872   0% /mnt/lustre[OST:2]
lustre-OST0003_UUID      2031872        1024     1799424   0% /mnt/lustre[OST:3]
lustre-OST0004_UUID      2031872         896     1996160   0% /mnt/lustre[OST:4]
lustre-OST0005_UUID      2032000        1024     2027904   0% /mnt/lustre[OST:5]
lustre-OST0006_UUID      2032000        1280     2027776   0% /mnt/lustre[OST:6]

filesystem summary:     14223360        6912    13697280   0% /mnt/lustre

osp.lustre-OST0000-osp-MDT0000.sync_changes=0
osp.lustre-OST0000-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0000-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0001-osp-MDT0000.sync_changes=0
osp.lustre-OST0001-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0001-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0002-osp-MDT0000.sync_changes=0
osp.lustre-OST0002-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0002-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0003-osp-MDT0000.sync_changes=0
osp.lustre-OST0003-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0003-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0004-osp-MDT0000.sync_changes=0
osp.lustre-OST0004-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0004-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0005-osp-MDT0000.sync_changes=0
osp.lustre-OST0005-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0005-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0006-osp-MDT0000.sync_changes=0
osp.lustre-OST0006-osp-MDT0000.sync_in_flight=0
osp.lustre-OST0006-osp-MDT0000.sync_in_progress=0
osp.lustre-OST0000-osp-MDT0000.prealloc_status=0
osp.lustre-OST0001-osp-MDT0000.prealloc_status=0
osp.lustre-OST0002-osp-MDT0000.prealloc_status=0
osp.lustre-OST0003-osp-MDT0000.prealloc_status=0
osp.lustre-OST0004-osp-MDT0000.prealloc_status=0
osp.lustre-OST0005-osp-MDT0000.prealloc_status=0
osp.lustre-OST0006-osp-MDT0000.prealloc_status=0
 replay-single test_20b: @@@@@@ FAIL: after 6912 &amp;gt; before 6784 
Dumping lctl log to /logdir/test_logs/2011-12-27/lustre-dev-el6-x86_64-zfs__277__-7fa5674d2740/replay-single.test_20b.*.1325043197.log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="12825">LU-2342</key>
            <summary>replay-single test_20b: @@@@@@ FAIL: after 6912 &gt; before 6784</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="liwei">Li Wei</reporter>
                        <labels>
                            <label>HB</label>
                            <label>zfs</label>
                    </labels>
                <created>Sun, 8 Jan 2012 23:41:37 +0000</created>
                <updated>Wed, 1 Oct 2014 08:17:40 +0000</updated>
                            <resolved>Thu, 21 Mar 2013 12:00:59 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="26676" author="liwei" created="Mon, 16 Jan 2012 21:32:06 +0000"  >&lt;p&gt;Commit: 526c43ec2e47ead878f0df552b74c78b4fc79d1f (Jan 13, 2012)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/45a13d50-4072-11e1-ac07-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/45a13d50-4072-11e1-ac07-5254004bbbd3&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="27749" author="liwei" created="Thu, 2 Feb 2012 04:39:07 +0000"  >&lt;p&gt;Commit: faefc49f0854987d29639437064e81bbc4556774 (Feb 1, 2012)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/0b2c6ac8-4d79-11e1-a8f4-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/0b2c6ac8-4d79-11e1-a8f4-5254004bbbd3&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="28081" author="liwei" created="Tue, 7 Feb 2012 22:47:32 +0000"  >&lt;p&gt;Commit: faefc49f0854987d29639437064e81bbc4556774 (Feb 1, 2012)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/325d0fc6-4e50-11e1-88dd-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/325d0fc6-4e50-11e1-88dd-5254004bbbd3&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="29456" author="liwei" created="Sun, 19 Feb 2012 23:37:44 +0000"  >&lt;p&gt;Commit: f42d375dfb2f30f64440ee8bc9f78a9a3e9a9adc (Feb 17, 2012)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/899b6ff0-5b0f-11e1-8801-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/899b6ff0-5b0f-11e1-8801-5254004bbbd3&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="30691" author="liwei" created="Wed, 7 Mar 2012 22:57:42 +0000"  >&lt;p&gt;Commit: 3cf946177abe53ba791203006432272c6c7e798f (Mar 5, 2012)&lt;br/&gt;
Maloo: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/fcc187c8-65d5-11e1-92b1-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/fcc187c8-65d5-11e1-92b1-5254004bbbd3&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="47917" author="bzzz" created="Fri, 16 Nov 2012 05:53:12 +0000"  >&lt;p&gt;hmm, it is still happening ? if not, I&apos;m suggesting to close.&lt;/p&gt;</comment>
                            <comment id="47929" author="liwei" created="Fri, 16 Nov 2012 09:10:44 +0000"  >&lt;p&gt;My bad, forgot to post the latest failure after moving this from Orion.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/2221d300-2f94-11e2-bd52-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/2221d300-2f94-11e2-bd52-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48658" author="adilger" created="Mon, 3 Dec 2012 02:39:27 +0000"  >&lt;p&gt;Still failing:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/78d57236-3bcb-11e2-b98e-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/78d57236-3bcb-11e2-b98e-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="52939" author="tappro" created="Sun, 24 Feb 2013 22:05:59 +0000"  >&lt;p&gt;Doesn&apos;t it occur due to disabled gap handling? I bet it does. In that case this test should just be disable because it is not supposed to pass.&lt;/p&gt;</comment>
                            <comment id="53459" author="jlevi" created="Wed, 6 Mar 2013 13:36:24 +0000"  >&lt;p&gt;Nathaniel,&lt;br/&gt;
Could you discuss with Mike to see if this test needs to be disable for ZFS?&lt;/p&gt;</comment>
                            <comment id="53627" author="utopiabound" created="Fri, 8 Mar 2013 16:09:54 +0000"  >&lt;p&gt;In the test: the + 20 on the before value, If I understand it right is for the logs.  I think (from my work on replay-ost-single test 6/7 - &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2903&quot; title=&quot;replay-ost-single test_6 and test_7 failure&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2903&quot;&gt;&lt;del&gt;LU-2903&lt;/del&gt;&lt;/a&gt;) that the logs are larger on zfs (up to ~256KB instead of the 20 here or the 40 in replay-ost-single).&lt;/p&gt;</comment>
                            <comment id="53660" author="utopiabound" created="Sun, 10 Mar 2013 17:29:32 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/5666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5666&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53686" author="tappro" created="Mon, 11 Mar 2013 09:39:03 +0000"  >&lt;p&gt;I met the same issue in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2059&quot; title=&quot;mgc to backup configuration on osd-based llogs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2059&quot;&gt;&lt;del&gt;LU-2059&lt;/del&gt;&lt;/a&gt;, config logs are hardcoded as 40 blocks but sometimes it is 44 even on ldiskfs. I&apos;ve tried to don&apos;t guess but get size of logs via debugfs, but don&apos;t know how to do the same with zfs.&lt;/p&gt;</comment>
                            <comment id="54553" author="pjones" created="Thu, 21 Mar 2013 12:00:59 +0000"  >&lt;p&gt;Landed for 2.4&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzuw2n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>2858</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>