<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:12:32 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-999] Test failure on test suite lfsck</title>
                <link>https://jira.whamcloud.com/browse/LU-999</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/cbcbbd2e-3f50-11e1-990e-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/cbcbbd2e-3f50-11e1-990e-5254004bbbd3&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;fat-intel-1vm4: error getting mds_hdr (3685469441:8) in /tmp/mdsdb: DB_NOTFOUND: No matching key/data pair found&lt;br/&gt;
fat-intel-1vm4: e2fsck: aborted&lt;br/&gt;
Pass 1: Checking inodes, blocks, and sizes&lt;br/&gt;
Pass 1: Memory used: 432k/0k (265k/168k), time:  0.07/ 0.01/ 0.02&lt;br/&gt;
Pass 1: I/O read: 7MB, write: 0MB, rate: 100.18MB/s&lt;br/&gt;
Pass 2: Checking directory structure&lt;br/&gt;
Pass 2: Memory used: 432k/0k (291k/142k), time:  0.00/ 0.00/ 0.00&lt;br/&gt;
Pass 2: I/O read: 3MB, write: 0MB, rate: 1337.49MB/s&lt;br/&gt;
Pass 3: Checking directory connectivity&lt;br/&gt;
Peak memory: Memory used: 432k/0k (308k/125k), time:  0.11/ 0.01/ 0.03&lt;br/&gt;
Pass 3: Memory used: 432k/0k (291k/142k), time:  0.00/ 0.00/ 0.00&lt;br/&gt;
Pass 3: I/O read: 1MB, write: 0MB, rate: 27027.03MB/s&lt;br/&gt;
Pass 4: Checking reference counts&lt;br/&gt;
Pass 4: Memory used: 396k/0k (164k/233k), time:  0.00/ 0.00/ 0.00&lt;br/&gt;
Pass 4: I/O read: 0MB, write: 0MB, rate: 0.00MB/s&lt;br/&gt;
Pass 5: Checking group summary information&lt;br/&gt;
Pass 5: Memory used: 396k/0k (148k/249k), time:  0.02/ 0.01/ 0.00&lt;br/&gt;
Pass 5: I/O read: 1MB, write: 0MB, rate: 64.42MB/s&lt;br/&gt;
Pass 6: Acquiring OST information for lfsck&lt;br/&gt;
 lfsck : @@@@@@ FAIL: e2fsck &lt;del&gt;d -v -t -t -f -n --mdsdb /tmp/mdsdb --ostdb /tmp/ostdb-0 /dev/mapper/lvm&lt;/del&gt;-OSS-P0 returned 8, should be &amp;lt;= 1 &lt;/p&gt;</description>
                <environment></environment>
        <key id="12902">LU-999</key>
            <summary>Test failure on test suite lfsck</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 16 Jan 2012 13:34:59 +0000</created>
                <updated>Mon, 6 Feb 2012 21:45:49 +0000</updated>
                            <resolved>Mon, 6 Feb 2012 21:45:28 +0000</resolved>
                                    <version>Lustre 2.2.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="26671" author="pjones" created="Mon, 16 Jan 2012 17:19:47 +0000"  >&lt;p&gt;Bobi&lt;/p&gt;

&lt;p&gt;Could you please look into this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="26686" author="adilger" created="Mon, 16 Jan 2012 22:50:23 +0000"  >&lt;p&gt;The first thing to check is if the same version of e2fsprogs is installed on the MDS and OSS?  Next, check if the version of db4 used by e2fsck is the same on both. This bug was hit in the past, and was due to db4 version mismatches. Please search bugzilla for this error messages.  &lt;/p&gt;</comment>
                            <comment id="27692" author="bobijam" created="Tue, 31 Jan 2012 23:31:19 +0000"  >&lt;p&gt;error msg is:&lt;/p&gt;

&lt;p&gt;fat-intel-1vm4: error getting mds_hdr (3685469441:8) in /tmp/mdsdb: DB_NOTFOUND: No matching key/data pair found&lt;/p&gt;

&lt;p&gt;the corresponding e2fsck code are:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;        memset(&amp;amp;mds_hdr, 0, sizeof(mds_hdr));
        mds_hdr.mds_magic = MDS_MAGIC;           // ====&amp;gt;  0xDBABCD01 == 3685469441
        memset(&amp;amp;key, 0, sizeof(key));
        memset(&amp;amp;data, 0, sizeof(data));
        key.data = &amp;amp;mds_hdr.mds_magic;
        key.size = sizeof(mds_hdr.mds_magic);
        data.data = &amp;amp;mds_hdr;
        data.size = sizeof(mds_hdr);
        data.ulen = sizeof(mds_hdr);
        data.flags = DB_DBT_USERMEM;
        rc = mds_hdrdb-&amp;gt;get(mds_hdrdb, NULL, &amp;amp;key, &amp;amp;data, 0);
        if (rc) {
                fprintf(stderr,&quot;error getting mds_hdr (&quot;LPU64&quot;:%u) in %s: %s\n&quot;,
                        mds_hdr.mds_magic, (int)sizeof(mds_hdr.mds_magic),
                        ctx-&amp;gt;lustre_mdsdb, db_strerror(rc));
                ctx-&amp;gt;flags |= E2F_FLAG_ABORT;
                goto out;
        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;e2fsck cannot find the correct mds header magic value in /tmp/mdsdb when generating ost db.&lt;/p&gt;

&lt;p&gt;This could be caused by db4 version mismatch, but I checked the test session info &lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/99aea334-3f4f-11e1-990e-5254004bbbd3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/99aea334-3f4f-11e1-990e-5254004bbbd3&lt;/a&gt;, MDS(fat-intel-1vm3) and OSTs(fat-intel-1vm4) use the same build image (Kernel Version:	2.6.32-131.17.1.el6_lustre.ge126ace.x86_64          Lustre Version:	jenkins-arch=x86_64,build_type=server,distro=el6,ib_stack=inkern)&lt;/p&gt;
</comment>
                            <comment id="27982" author="adilger" created="Mon, 6 Feb 2012 14:34:22 +0000"  >&lt;p&gt;Is the /tmp/mdsdb file available on the OSS node, and is it definitely the right one (i.e. not left over from some previous run)?  I haven&apos;t checked this code to verify if it will fail with a &quot;file not found&quot; if the mdsdb file is missing entirely.  Since the OSS and MDS are running in different VM images, it may be that the file is not being copied to the OSS correctly.&lt;/p&gt;</comment>
                            <comment id="28001" author="bobijam" created="Mon, 6 Feb 2012 20:10:49 +0000"  >&lt;p&gt;Sarah,&lt;/p&gt;

&lt;p&gt;Would you please check whether /tmp is a shared directory among MDS and OSS, and whether /tmp/mdsdb on the OSS node is exactly the same one as on the MDS node if we got another hit?&lt;/p&gt;

&lt;p&gt;Thanks.&lt;/p&gt;</comment>
                            <comment id="28002" author="yujian" created="Mon, 6 Feb 2012 21:18:57 +0000"  >&lt;p&gt;It seems this is the same issue as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-427&quot; title=&quot;Test failure on test suite lfsck&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-427&quot;&gt;&lt;del&gt;LU-427&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="28004" author="bobijam" created="Mon, 6 Feb 2012 21:45:29 +0000"  >&lt;p&gt;yes, I think it&apos;s dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-427&quot; title=&quot;Test failure on test suite lfsck&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-427&quot;&gt;&lt;del&gt;LU-427&lt;/del&gt;&lt;/a&gt;, thanks Yujian.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="11186">LU-427</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvhi7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6485</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>