<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:02:50 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6739] EL7 mds-survey test_1: mds-survey failed</title>
                <link>https://jira.whamcloud.com/browse/LU-6739</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ea948bde-135d-11e5-b4b0-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ea948bde-135d-11e5-b4b0-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_1 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mds-survey failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There is no log for MDS at all, this failure blocks all the following tests from being run. test log shows&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;====&amp;gt; Destroy 4 directories on onyx-42vm3:lustre-MDT0000_ecc
ssh_exchange_identification: Connection closed by remote host
Mon Jun 15 05:04:43 PDT 2015 /usr/bin/mds-survey from onyx-42vm6.onyx.hpdd.intel.com
mdt 1 file  103011 dir    4 thr    4 create 18976.80 [ 11998.67, 23975.59] lookup 397752.68 [ 397752.68, 397752.68] md_getattr 294970.99 [ 294970.99, 294970.99] setxattr 1069.79 [    0.00, 7999.06] destroy             ERROR 
mdt 1 file  103011 dir    4 thr    8 create             ERROR lookup             ERROR md_getattr             ERROR setxattr             ERROR destroy             ERROR 
starting run for config:  test: create  file: 103011 threads: 4  directories: 4
starting run for config:  test: lookup  file: 103011 threads: 4  directories: 4
starting run for config:  test: md_getattr  file: 103011 threads: 4  directories: 4
starting run for config:  test: setxattr  file: 103011 threads: 4  directories: 4
starting run for config:  test: destroy  file: 103011 threads: 4  directories: 4
starting run for config:  test: create  file: 103011 threads: 8  directories: 4
starting run for config:  test: lookup  file: 103011 threads: 8  directories: 4
starting run for config:  test: md_getattr  file: 103011 threads: 8  directories: 4
starting run for config:  test: setxattr  file: 103011 threads: 8  directories: 4
starting run for config:  test: destroy  file: 103011 threads: 8  directories: 4
 mds-survey test_1: @@@@@@ FAIL: mds-survey failed 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server and client: lustre-master build # 3071 EL7</environment>
        <key id="30724">LU-6739</key>
            <summary>EL7 mds-survey test_1: mds-survey failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 17 Jun 2015 23:52:35 +0000</created>
                <updated>Tue, 23 Jun 2015 18:04:36 +0000</updated>
                            <resolved>Thu, 18 Jun 2015 17:50:08 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="119001" author="adilger" created="Thu, 18 Jun 2015 17:50:01 +0000"  >&lt;p&gt;The console logs are under &quot;lustre-provisioning&quot; from before mds-survey:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;12:06:51:[ 1721.271559] WARNING: at lustre-2.7.55/ldiskfs/ext4_jbd2.c:260 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]()
12:06:51:[ 1721.294943] CPU: 1 PID: 5479 Comm: lctl Tainted: GF          O--------------   3.10.0-229.4.2.el7_lustre.x86_64 #1
12:06:51:[ 1721.297354] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
12:06:52:[ 1721.306555] Call Trace:
12:06:52:[ 1721.308459]  [&amp;lt;ffffffff816050da&amp;gt;] dump_stack+0x19/0x1b
12:06:52:[ 1721.310520]  [&amp;lt;ffffffff8106e34b&amp;gt;] warn_slowpath_common+0x6b/0xb0
12:06:52:[ 1721.312659]  [&amp;lt;ffffffff8106e49a&amp;gt;] warn_slowpath_null+0x1a/0x20
12:06:52:[ 1721.314897]  [&amp;lt;ffffffffa05616b2&amp;gt;] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
12:06:52:[ 1721.319529]  [&amp;lt;ffffffffa0584659&amp;gt;] ldiskfs_free_blocks+0x5c9/0xb90 [ldiskfs]
12:06:53:[ 1721.321814]  [&amp;lt;ffffffffa0578f75&amp;gt;] ldiskfs_xattr_release_block+0x275/0x330 [ldiskfs]
12:06:53:[ 1721.324060]  [&amp;lt;ffffffffa057c1ab&amp;gt;] ldiskfs_xattr_delete_inode+0x2bb/0x300 [ldiskfs]
12:06:53:[ 1721.326316]  [&amp;lt;ffffffffa0576ad5&amp;gt;] ldiskfs_evict_inode+0x1b5/0x610 [ldiskfs]
12:06:53:[ 1721.328683]  [&amp;lt;ffffffff811e23d7&amp;gt;] evict+0xa7/0x170
12:06:53:[ 1721.330790]  [&amp;lt;ffffffff811e2c15&amp;gt;] iput+0xf5/0x180
12:06:53:[ 1721.332864]  [&amp;lt;ffffffffa0ba3e73&amp;gt;] osd_object_delete+0x1d3/0x300 [osd_ldiskfs]
12:06:53:[ 1721.335175]  [&amp;lt;ffffffffa07586ad&amp;gt;] lu_object_free.isra.30+0x9d/0x1a0 [obdclass]
12:06:53:[ 1721.337494]  [&amp;lt;ffffffffa0758872&amp;gt;] lu_object_put+0xc2/0x320 [obdclass]
12:06:54:[ 1721.339735]  [&amp;lt;ffffffffa0f2a6d7&amp;gt;] echo_md_destroy_internal+0xe7/0x520 [obdecho]
12:06:54:[ 1721.342007]  [&amp;lt;ffffffffa0f3217a&amp;gt;] echo_md_handler.isra.43+0x191a/0x2250 [obdecho]
12:06:54:[ 1721.348581]  [&amp;lt;ffffffffa0f34766&amp;gt;] echo_client_iocontrol+0x1146/0x1d10 [obdecho]
12:06:54:[ 1721.354898]  [&amp;lt;ffffffffa0724d1c&amp;gt;] class_handle_ioctl+0x1b3c/0x22b0 [obdclass]
12:06:54:[ 1721.358813]  [&amp;lt;ffffffffa070a5e2&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
12:06:54:[ 1721.360799]  [&amp;lt;ffffffff811da2c5&amp;gt;] do_vfs_ioctl+0x2e5/0x4c0
12:06:54:[ 1721.364564]  [&amp;lt;ffffffff811da541&amp;gt;] SyS_ioctl+0xa1/0xc0
12:06:55:[ 1721.366357]  [&amp;lt;ffffffff81615029&amp;gt;] system_call_fastpath+0x16/0x1b
12:06:55:[ 1721.368204] ---[ end trace aed93badbc88e370 ]---
12:06:55:[ 1721.370058] LDISKFS-fs: ldiskfs_free_blocks:5107: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
12:06:55:[ 1721.372395] LDISKFS: jbd2_journal_dirty_metadata failed: handle type 5 started at line 240, credits 3/0, errcode -28
12:06:55:[ 1721.385026] LDISKFS-fs error (device dm-0) in ldiskfs_free_blocks:5123: error 28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looks like the same journal size problem as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="30655">LU-6722</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxg27:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>