<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:49:18 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5190] lfsck: FAIL: e2fsck returned 4, should be &lt;= 1</title>
                <link>https://jira.whamcloud.com/browse/LU-5190</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;lfsck test failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lustre-MDT0000: ********** WARNING: Filesystem still has errors **********


         751 inodes used (0.07%, out of 1048576)
          13 non-contiguous files (1.7%)
           0 non-contiguous directories (0.0%)
             # of inodes with ind/dind/tind blocks: 1/0/0
      154242 blocks used (29.42%, out of 524288)
           0 bad blocks
           1 large file

         560 regular files
         182 directories
           0 character device files
           0 block device files
           0 fifos
          10 links
           0 symbolic links (0 fast symbolic links)
           0 sockets
------------
         469 files
Memory used: 2756k/21184k (1093k/1664k), time:  4.83/ 0.11/ 0.06
I/O read: 9MB, write: 0MB, rate: 1.86MB/s
 lfsck : @@@@@@ FAIL: e2fsck -d -v -t -t -f -n --mdsdb /home/autotest/.autotest/2014-06-11/212151-69837947160380/mdsdb /dev/mapper/lvm--Role_MDS-P1 returned 4, should be &amp;lt;= 1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_logs/b31a15b8-f2fa-11e3-a3d9-52540035b04c/show_text&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_logs/b31a15b8-f2fa-11e3-a3d9-52540035b04c/show_text&lt;/a&gt;&lt;/p&gt;</description>
                <environment>Lustre build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/63/&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/63/&lt;/a&gt; (2.5.2 RC1)</environment>
        <key id="25149">LU-5190</key>
            <summary>lfsck: FAIL: e2fsck returned 4, should be &lt;= 1</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="yujian">Jian Yu</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Fri, 13 Jun 2014 15:57:56 +0000</created>
                <updated>Mon, 16 Jun 2014 08:40:16 +0000</updated>
                            <resolved>Mon, 16 Jun 2014 08:40:16 +0000</resolved>
                                    <version>Lustre 2.5.2</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="86569" author="yujian" created="Fri, 13 Jun 2014 16:00:21 +0000"  >&lt;p&gt;The failure occurred on all of the regression test sessions on Lustre 2.5.2 RC1:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/f120d00c-f2f7-11e3-a3d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/f120d00c-f2f7-11e3-a3d9-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/ee26bffa-f2ee-11e3-a3d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/ee26bffa-f2ee-11e3-a3d9-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/ef2b0eac-f2d4-11e3-86ca-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/ef2b0eac-f2d4-11e3-86ca-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/2751df84-f2cd-11e3-a3d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/2751df84-f2cd-11e3-a3d9-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It&apos;s a regression in comparison with Lustre b2_5 build #61.&lt;/p&gt;</comment>
                            <comment id="86617" author="pjones" created="Fri, 13 Jun 2014 20:54:50 +0000"  >&lt;p&gt;Fan Yong&lt;/p&gt;

&lt;p&gt;Could you please advise on this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="86633" author="yong.fan" created="Sat, 14 Jun 2014 00:59:28 +0000"  >&lt;p&gt;This &quot;lfsck&quot; failure is not the &quot;LFSCK&quot; that we are working on for OpenSFS contract. From the test log, we only can say that the e2fsck found some on-disk data corruption. Such data corruption may be left from former other tests. I have checked the test history, when the failure occurred, the tests order was:&lt;/p&gt;

&lt;p&gt;1) sanity-lfsck.sh&lt;br/&gt;
2) sanityn.sh&lt;br/&gt;
3) sanity-hsm.sh&lt;br/&gt;
4) lfsck.sh&lt;/p&gt;

&lt;p&gt;For sanity-lfsck.sh, it will reformat the system after the testing, so when sanityn.sh started, the system must be clean. So it is quite possible that the data corruption was introduced by sanityn.sh or sanity-hsm.sh. Unfortunately, neither sanityn nor sanity-hsm can detect data corruption be itself. So they were marked as success. So there is no logs can be used for further analysis.&lt;/p&gt;</comment>
                            <comment id="86636" author="green" created="Sat, 14 Jun 2014 02:05:26 +0000"  >&lt;p&gt;Yujian, can you please see if thid is reproduceable on a single node simple cluster and then perhaps let&apos;s try to isolate which of the following patches caused this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LU-4852 osc: osc_extent_truncate()) ASSERTION( !ext-&amp;gt;oe_urgent ) failed (detail / gitweb)
LU-4676 hsm: Fix return value error of ct_run() (detail / gitweb)
LU-4830 tests: only deactivate MDTs of Lustre FSNAME (detail / gitweb)
LU-2524 test: Modify tdir to be single directory (detail / gitweb)
LU-4573 tests: check all MDTs for open files (detail / gitweb)
LU-4102 doc: recommend newer e2fsprogs version (detail / gitweb)
LU-4780 lnet: NI shutdown may loop forever (detail / gitweb)
LU-5100 llite: set dir LOV xattr length variable (detail / gitweb)
LU-5133 tests: Add version check in sanity/238 (detail / gitweb)
LU-3386 lproc: improve osc/mdc &quot;imports&quot; connect data (detail / gitweb)
LU-5132 tests: Add version check to sanity/160c (detail / gitweb)
LU-5047 tests: correct cleanup files in sanity.sh (detail / gitweb)
LU-4887 tests: sanity-scrub interoperability tests with master (detail / gitweb)
LU-4569 hsm: Prevent copytool from importing existing file. (detail / gitweb)
LU-2272 statahead: ll_intent_drop_lock() called in spinlock (detail / gitweb)
LU-5116 ptlrpc: race at req processing (detail / gitweb)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Most of those are testing only changes, though&lt;/p&gt;</comment>
                            <comment id="86638" author="yujian" created="Sat, 14 Jun 2014 15:36:58 +0000"  >&lt;p&gt;Sure, Oleg, will do.&lt;/p&gt;</comment>
                            <comment id="86663" author="yujian" created="Mon, 16 Jun 2014 08:33:48 +0000"  >&lt;p&gt;Test results showed that this was a known issue on Lustre b2_5 branch. The reason that the failure was not detected in previous builds was that while running lfsck.sh, the Lustre filesystem was not empty:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; is_empty_fs $MOUNT; then
        # create test directory
        mkdir -p $TESTDIR || error &lt;span class=&quot;code-quote&quot;&gt;&quot;mkdir $TESTDIR failed&quot;&lt;/span&gt;

        # create some dirs and files on the filesystem
        create_files $TESTDIR $NUMDIRS $NUMFILES

        # ......
&lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt; # is_empty_fs $MOUNT
        FSCK_MAX_ERR=4   # file system errors left uncorrected
        sync; sync; sleep 3 # make sure all data flush back
fi
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;If we &lt;b&gt;only&lt;/b&gt; ran lfsck.sh on previous builds, then the same failure also occurred. It was one of the changes in build #63 that disclosed the failure.&lt;/p&gt;

&lt;p&gt;The focus on this ticket is to fix the real failure.&lt;/p&gt;</comment>
                            <comment id="86665" author="yujian" created="Mon, 16 Jun 2014 08:40:01 +0000"  >&lt;p&gt;This is a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4169&quot; title=&quot;lfsck: FAIL: e2fsck returned 4, should be &amp;lt;= 1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4169&quot;&gt;&lt;del&gt;LU-4169&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="21690">LU-4169</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwp0v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>14508</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>