<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:52:06 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5510] 2.4.3&lt;-&gt;2.5.3 interop: sanity-scrub test_15: FAIL: (7) Expected &apos;inconsistent&apos; on mds1, but got &apos;inconsistent,upgrade&apos; </title>
                <link>https://jira.whamcloud.com/browse/LU-5510</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanity-scrub test 15 failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Started LFSCK on the MDT device lustre-MDT0000.
CMD: shadow-46vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.oi_scrub
CMD: shadow-46vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.oi_scrub
 sanity-scrub test_15: @@@@@@ FAIL: (7) Expected &apos;inconsistent&apos; on mds1, but got &apos;inconsistent,upgrade&apos; 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6a6b507e-f8af-11e3-842c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6a6b507e-f8af-11e3-842c-5254006e85c2&lt;/a&gt;&lt;/p&gt;</description>
                <environment>Lustre client build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/80/&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/80/&lt;/a&gt;&lt;br/&gt;
Lustre server build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_4/73/&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_4/73/&lt;/a&gt; (2.4.3)&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64</environment>
        <key id="26077">LU-5510</key>
            <summary>2.4.3&lt;-&gt;2.5.3 interop: sanity-scrub test_15: FAIL: (7) Expected &apos;inconsistent&apos; on mds1, but got &apos;inconsistent,upgrade&apos; </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Wed, 20 Aug 2014 06:56:46 +0000</created>
                <updated>Wed, 11 Feb 2015 09:18:40 +0000</updated>
                            <resolved>Sun, 8 Feb 2015 04:52:52 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.5.3</version>
                                    <fixVersion>Lustre 2.7.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="92093" author="yujian" created="Thu, 21 Aug 2014 00:21:24 +0000"  >&lt;p&gt;Hi Nasf,&lt;/p&gt;

&lt;p&gt;Is this a new interop failure related to the patches for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4058&quot; title=&quot;Interop 2.4.0&amp;lt;-&amp;gt;2.5 failure on test suite sanity-scrub test_15&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4058&quot;&gt;&lt;del&gt;LU-4058&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="92115" author="yujian" created="Thu, 21 Aug 2014 07:16:19 +0000"  >&lt;p&gt;The failure did not occur in the interop testing between Lustre b2_5 build #83 client with Lustre 2.4.3 server.&lt;/p&gt;</comment>
                            <comment id="92982" author="yujian" created="Tue, 2 Sep 2014 17:28:29 +0000"  >&lt;p&gt;Lustre client build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/86/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/86/&lt;/a&gt; (2.5.3 RC1)&lt;br/&gt;
Lustre server build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_4/73/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_4/73/&lt;/a&gt; (2.4.3)&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64&lt;/p&gt;

&lt;p&gt;The failure occurred again: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/91e037a4-31f4-11e4-8d72-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/91e037a4-31f4-11e4-8d72-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="102307" author="yong.fan" created="Thu, 25 Dec 2014 08:36:13 +0000"  >&lt;p&gt;Yujian,&lt;/p&gt;

&lt;p&gt;I do not think the issue is related with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4058&quot; title=&quot;Interop 2.4.0&amp;lt;-&amp;gt;2.5 failure on test suite sanity-scrub test_15&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4058&quot;&gt;&lt;del&gt;LU-4058&lt;/del&gt;&lt;/a&gt;. It looks like that there were some objects without LMA EA, then the OI scrub regarded that the system was upgrading from Lustre-1.8. It may be not related with the interoperability, instead, it may be reproducible on Lustre-2.4.3 directly. Have you ever run the sanity-scrub on pure Lustre-2.4.3 without interoperating with Lustre-2.5.3?&lt;/p&gt;</comment>
                            <comment id="102314" author="yujian" created="Thu, 25 Dec 2014 23:02:22 +0000"  >&lt;blockquote&gt;&lt;p&gt;Have you ever run the sanity-scrub on pure Lustre-2.4.3 without interoperating with Lustre-2.5.3?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yes, and here are the full group test sessions on Lustre 2.4.3:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/e75fa67e-ab62-11e3-a696-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/e75fa67e-ab62-11e3-a696-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/3aaccf46-ac5d-11e3-81d7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/3aaccf46-ac5d-11e3-81d7-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/ffeb5ee0-abe4-11e3-bcad-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/ffeb5ee0-abe4-11e3-bcad-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;sanity-scrub test passed.&lt;/p&gt;

&lt;p&gt;However, I found the following failures on Lustre 2.4.2 full group test sessions:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4f889346-6a5e-11e3-81c0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4f889346-6a5e-11e3-81c0-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/fc56303c-6c58-11e3-92d0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/fc56303c-6c58-11e3-92d0-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It looks like the failure existed on Lustre b2_4 and occured sporadically.&lt;/p&gt;</comment>
                            <comment id="102318" author="gerrit" created="Fri, 26 Dec 2014 03:22:48 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13187&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13187&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5510&quot; title=&quot;2.4.3&amp;lt;-&amp;gt;2.5.3 interop: sanity-scrub test_15: FAIL: (7) Expected &amp;#39;inconsistent&amp;#39; on mds1, but got &amp;#39;inconsistent,upgrade&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5510&quot;&gt;&lt;del&gt;LU-5510&lt;/del&gt;&lt;/a&gt; scrub: ldiskfs_create_inode returns locked inode&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: cac7f444551d7d495d5dbc744c9114d1bdd77fbb&lt;/p&gt;</comment>
                            <comment id="106169" author="gerrit" created="Sun, 8 Feb 2015 02:27:16 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13187/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13187/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5510&quot; title=&quot;2.4.3&amp;lt;-&amp;gt;2.5.3 interop: sanity-scrub test_15: FAIL: (7) Expected &amp;#39;inconsistent&amp;#39; on mds1, but got &amp;#39;inconsistent,upgrade&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5510&quot;&gt;&lt;del&gt;LU-5510&lt;/del&gt;&lt;/a&gt; scrub: ldiskfs_create_inode returns locked inode&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3c357081f5c0af79d76e2f556c14ca74ca47cf3b&lt;/p&gt;</comment>
                            <comment id="106186" author="pjones" created="Sun, 8 Feb 2015 04:52:52 +0000"  >&lt;p&gt;Landed for 2.7&lt;/p&gt;</comment>
                            <comment id="106602" author="adilger" created="Wed, 11 Feb 2015 09:18:40 +0000"  >&lt;p&gt;I saw that a few people other than myself hit the problem:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 19224:0:(osd_handler.c:2334:__osd_object_create()) ASSERTION( obj-&amp;gt;oo_inode-&amp;gt;i_state &amp;amp; 8 ) failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;that was added as part of this patch.  Just adding a comment here to make it clear that you need to do &quot;&lt;tt&gt;make -C ldiskfs clean&lt;/tt&gt;&quot; to rebuild the ldiskfs module with the new patch.  It seems there is something wrong with the build dependencies in ldiskfs that prevent it from automatically detecting that the source patches have changed since they were applied.&lt;/p&gt;

&lt;p&gt;I was thinking of disabling this LASSERT() and just handling the case of a returned locked inode, but there isn&apos;t really any reason to be compatible with different versions of ldiskfs, since it is always included as part of the Lustre modules (AFAIK only LLNL made their own ldiskfs modules with their kernel and have since moved to ZFS anyway).&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwu3j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15367</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>