<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:48:34 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5105] Test failure sanity-lfsck test_18d: umount mds hung</title>
                <link>https://jira.whamcloud.com/browse/LU-5105</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Nathaniel Clark &amp;lt;nathaniel.l.clark@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_18d failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: sanity-lfsck 18d&lt;/p&gt;

&lt;p&gt;MDS syslog:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;umount        D 0000000000000000     0 19510  19509 0x00000080
 ffff880051eaf8b8 0000000000000082 0000000000000000 ffff880051eaf87c
 0000000000000282 0000000000000282 ffff880051eaf858 ffffffff8108410c
 ffff8800554de5f8 ffff880051eaffd8 000000000000fbc8 ffff8800554de5f8
Call Trace:
 [&amp;lt;ffffffff8108410c&amp;gt;] ? lock_timer_base+0x3c/0x70
 [&amp;lt;ffffffff815291c2&amp;gt;] schedule_timeout+0x192/0x2e0
 [&amp;lt;ffffffff81084220&amp;gt;] ? process_timeout+0x0/0x10
 [&amp;lt;ffffffff8152932e&amp;gt;] schedule_timeout_uninterruptible+0x1e/0x20
 [&amp;lt;ffffffffa123ddea&amp;gt;] dnode_special_close+0x2a/0x60 [zfs]
 [&amp;lt;ffffffffa1232652&amp;gt;] dmu_objset_evict+0x92/0x400 [zfs]
 [&amp;lt;ffffffffa1243c50&amp;gt;] dsl_dataset_evict+0x30/0x1b0 [zfs]
 [&amp;lt;ffffffffa1223dd9&amp;gt;] dbuf_evict_user+0x49/0x80 [zfs]
 [&amp;lt;ffffffffa1225087&amp;gt;] dbuf_rele_and_unlock+0xf7/0x1e0 [zfs]
 [&amp;lt;ffffffffa12254e0&amp;gt;] dmu_buf_rele+0x30/0x40 [zfs]
 [&amp;lt;ffffffffa1249170&amp;gt;] dsl_dataset_disown+0xb0/0x1d0 [zfs]
 [&amp;lt;ffffffffa1231751&amp;gt;] dmu_objset_disown+0x11/0x20 [zfs]
 [&amp;lt;ffffffffa18f690e&amp;gt;] udmu_objset_close+0x2e/0x40 [osd_zfs]
 [&amp;lt;ffffffffa18f4f86&amp;gt;] osd_device_fini+0x366/0x5c0 [osd_zfs]
 [&amp;lt;ffffffffa0d9dd53&amp;gt;] class_cleanup+0x573/0xd30 [obdclass]
 [&amp;lt;ffffffffa0d757a6&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
 [&amp;lt;ffffffffa0d9fa7a&amp;gt;] class_process_config+0x156a/0x1ad0 [obdclass]
 [&amp;lt;ffffffffa0d97d53&amp;gt;] ? lustre_cfg_new+0x2d3/0x6e0 [obdclass]
 [&amp;lt;ffffffffa0da0159&amp;gt;] class_manual_cleanup+0x179/0x6f0 [obdclass]
 [&amp;lt;ffffffffa0d73c7b&amp;gt;] ? class_export_put+0x10b/0x2c0 [obdclass]
 [&amp;lt;ffffffffa18f412d&amp;gt;] osd_obd_disconnect+0x1bd/0x1c0 [osd_zfs]
 [&amp;lt;ffffffffa0da273b&amp;gt;] lustre_put_lsi+0x1ab/0x11a0 [obdclass]
 [&amp;lt;ffffffffa0daacf8&amp;gt;] lustre_common_put_super+0x5d8/0xbe0 [obdclass]
 [&amp;lt;ffffffffa0dd8c70&amp;gt;] server_put_super+0x180/0xe40 [obdclass]
 [&amp;lt;ffffffff8118b31b&amp;gt;] generic_shutdown_super+0x5b/0xe0
 [&amp;lt;ffffffff8118b406&amp;gt;] kill_anon_super+0x16/0x60
 [&amp;lt;ffffffffa0da2016&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
 [&amp;lt;ffffffff8118bba7&amp;gt;] deactivate_super+0x57/0x80
 [&amp;lt;ffffffff811aabdf&amp;gt;] mntput_no_expire+0xbf/0x110
 [&amp;lt;ffffffff811ab72b&amp;gt;] sys_umount+0x7b/0x3a0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="24826">LU-5105</key>
            <summary>Test failure sanity-lfsck test_18d: umount mds hung</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="2">Won&apos;t Fix</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="utopiabound">Nathaniel Clark</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Tue, 27 May 2014 14:23:48 +0000</created>
                <updated>Fri, 23 Oct 2015 14:45:41 +0000</updated>
                            <resolved>Fri, 23 Oct 2015 14:45:41 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="96004" author="yong.fan" created="Thu, 9 Oct 2014 09:54:12 +0000"  >&lt;p&gt;Another failure instance:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/adb9bee6-4b17-11e4-941e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/adb9bee6-4b17-11e4-941e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="102703" author="yong.fan" created="Wed, 7 Jan 2015 00:46:28 +0000"  >&lt;p&gt;For the failures in &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/adb9bee6-4b17-11e4-941e-5254006e85c2:&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/adb9bee6-4b17-11e4-941e-5254006e85c2:&lt;/a&gt;&lt;br/&gt;
sanity-lfsck test_18c failure is another instance of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5848&quot; title=&quot;sanity-lfsck test_18e:  MDS is not the expected &amp;#39;completed&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5848&quot;&gt;&lt;del&gt;LU-5848&lt;/del&gt;&lt;/a&gt;. test_18d hung when umount the mds4 because the former lfsck assistant thread for test_18c was blocked at dt_sync(), so it is the side effect of test_18c failure (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5848&quot; title=&quot;sanity-lfsck test_18e:  MDS is not the expected &amp;#39;completed&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5848&quot;&gt;&lt;del&gt;LU-5848&lt;/del&gt;&lt;/a&gt;), not the same as the original ZFS based test_18d hung in &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="102854" author="yong.fan" created="Thu, 8 Jan 2015 11:18:14 +0000"  >&lt;p&gt;Alex, do you have any idea about umount MDS hung for ZFS based backend?&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6c3d597e-e351-11e3-93d9-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="102881" author="bzzz" created="Thu, 8 Jan 2015 15:55:43 +0000"  >&lt;p&gt;well, in that specific case it looks like some dnode was still referenced:&lt;br/&gt;
Call Trace:&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8108410c&amp;gt;&amp;#93;&lt;/span&gt; ? lock_timer_base+0x3c/0x70&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff815291c2&amp;gt;&amp;#93;&lt;/span&gt; schedule_timeout+0x192/0x2e0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81084220&amp;gt;&amp;#93;&lt;/span&gt; ? process_timeout+0x0/0x10&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8152932e&amp;gt;&amp;#93;&lt;/span&gt; schedule_timeout_uninterruptible+0x1e/0x20&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa123ddea&amp;gt;&amp;#93;&lt;/span&gt; dnode_special_close+0x2a/0x60 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1232652&amp;gt;&amp;#93;&lt;/span&gt; dmu_objset_evict+0x92/0x400 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1243c50&amp;gt;&amp;#93;&lt;/span&gt; dsl_dataset_evict+0x30/0x1b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1223dd9&amp;gt;&amp;#93;&lt;/span&gt; dbuf_evict_user+0x49/0x80 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1225087&amp;gt;&amp;#93;&lt;/span&gt; dbuf_rele_and_unlock+0xf7/0x1e0 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa12254e0&amp;gt;&amp;#93;&lt;/span&gt; dmu_buf_rele+0x30/0x40 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1249170&amp;gt;&amp;#93;&lt;/span&gt; dsl_dataset_disown+0xb0/0x1d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1231751&amp;gt;&amp;#93;&lt;/span&gt; dmu_objset_disown+0x11/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa18f690e&amp;gt;&amp;#93;&lt;/span&gt; udmu_objset_close+0x2e/0x40 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa18f4f86&amp;gt;&amp;#93;&lt;/span&gt; osd_device_fini+0x366/0x5c0 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;so the metadnode can&apos;t go blocking umount.&lt;/p&gt;

&lt;p&gt;but this seem to be some old version? we don&apos;t have udmu wrappers anymore.&lt;/p&gt;</comment>
                            <comment id="102882" author="yong.fan" created="Thu, 8 Jan 2015 16:11:16 +0000"  >&lt;p&gt;Yes, there seems no way to know which dnode still be referenced. The original issue was hit by the patch &lt;a href=&quot;http://review.whamcloud.com/#/c/10223/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10223/&lt;/a&gt;. I am not sure whether it is such patch special or not. But since such patch has been landed to master, there should be similar trouble on master branch. But it is also possible that such trouble has been fixed by other patch occasionally.&lt;/p&gt;</comment>
                            <comment id="103148" author="bzzz" created="Sun, 11 Jan 2015 04:49:19 +0000"  >&lt;p&gt;a patch to dump referenced dnodes.&lt;/p&gt;</comment>
                            <comment id="131369" author="yong.fan" created="Fri, 23 Oct 2015 14:45:41 +0000"  >&lt;p&gt;Close it since the issue only has been reported on very old version.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="16688" name="0001-dump-referenced-dnodes-at-umount.patch" size="2559" author="bzzz" created="Sun, 11 Jan 2015 04:49:19 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwn3r:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>14085</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>