<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:12:55 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7903] recovery-small test_23: hang on umount</title>
                <link>https://jira.whamcloud.com/browse/LU-7903</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Bob Glossman &amp;lt;bob.glossman@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f72c79d4-efbb-11e5-8ddc-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f72c79d4-efbb-11e5-8ddc-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_23 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;Info required for matching: recovery-small 23&lt;/p&gt;</description>
                <environment></environment>
        <key id="35516">LU-7903</key>
            <summary>recovery-small test_23: hang on umount</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 22 Mar 2016 15:52:52 +0000</created>
                <updated>Mon, 12 Dec 2016 20:21:56 +0000</updated>
                            <resolved>Fri, 14 Oct 2016 00:56:51 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>18</watches>
                                                                            <comments>
                            <comment id="146468" author="bogl" created="Tue, 22 Mar 2016 15:53:31 +0000"  >&lt;p&gt;this may be a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7172&quot; title=&quot;replay-single test_70d hung on MDT unmount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7172&quot;&gt;&lt;del&gt;LU-7172&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="147730" author="rhenwood" created="Mon, 4 Apr 2016 15:58:50 +0000"  >&lt;p&gt;just seen on Master running review-dne-part-1: &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/47205c78-f91f-11e5-a22e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/47205c78-f91f-11e5-a22e-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I&apos;ve just seen this and it doesn&apos;t look like a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7172&quot; title=&quot;replay-single test_70d hung on MDT unmount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7172&quot;&gt;&lt;del&gt;LU-7172&lt;/del&gt;&lt;/a&gt; to me: the call trace from the MDT starts:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;22:02:10:Call Trace:
22:02:10: [] schedule_timeout+0x192/0x2e0
22:02:10: [] ? process_timeout+0x0/0x10
22:02:10: [] obd_exports_barrier+0xb6/0x190 [obdclass]
22:02:10: [] mdt_device_fini+0x6ab/0x12e0 [mdt]
22:02:10: [] ? class_disconnect_exports+0x17d/0x2f0 [obdclass]
22:02:10: [] class_cleanup+0x572/0xd20 [obdclass]
22:02:10: [] ? class_name2dev+0x56/0xe0 [obdclass]
22:02:10: [] class_process_config+0x1b66/0x24c0 [obdclass]
22:02:10: [] ? libcfs_debug_msg+0x41/0x50 [libcfs]
22:02:10: [] class_manual_cleanup+0x4bf/0xc90 [obdclass]
22:02:10: [] ? class_name2dev+0x56/0xe0 [obdclass]
22:02:10: [] server_put_super+0x8bc/0xcd0 [obdclass]
22:02:10: [] generic_shutdown_super+0x5b/0xe0
22:02:10: [] kill_anon_super+0x16/0x60
22:02:10: [] lustre_kill_super+0x36/0x60 [obdclass]
22:02:10: [] deactivate_super+0x57/0x80
22:02:10: [] mntput_no_expire+0xbf/0x110
22:02:10: [] sys_umount+0x7b/0x3a0
22:02:10: [] system_call_fastpath+0x16/0x1b
22:37:59:********** Timeout by autotest system **********
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="149811" author="bfaccini" created="Fri, 22 Apr 2016 11:47:00 +0000"  >&lt;p&gt;+1 at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4e388a9a-0869-11e6-b5f1-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4e388a9a-0869-11e6-b5f1-5254006e85c2&lt;/a&gt; during a master patch review.&lt;/p&gt;</comment>
                            <comment id="149823" author="emoly.liu" created="Fri, 22 Apr 2016 13:54:07 +0000"  >&lt;p&gt;Another failure at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/5626d550-0845-11e6-9e5d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/5626d550-0845-11e6-9e5d-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="149827" author="bogl" created="Fri, 22 Apr 2016 14:12:50 +0000"  >&lt;p&gt;another on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d2808eda-086c-11e6-9e5d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d2808eda-086c-11e6-9e5d-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="150265" author="rhenwood" created="Tue, 26 Apr 2016 17:35:33 +0000"  >&lt;p&gt;another failure on master, from review-dne-part-1 &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7d9d1360-0b0b-11e6-855a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7d9d1360-0b0b-11e6-855a-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="154162" author="standan" created="Tue, 31 May 2016 19:20:22 +0000"  >&lt;p&gt;Another failure on master , from review-dne-part-1 : &lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/48332280-248f-11e6-aac3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/48332280-248f-11e6-aac3-5254006e85c2&lt;/a&gt;&lt;br/&gt;
This issue has occurred around 46 times in past 30 days.&lt;/p&gt;</comment>
                            <comment id="154718" author="yujian" created="Mon, 6 Jun 2016 12:54:16 +0000"  >&lt;p&gt;More failures on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/54653454-2ba5-11e6-80b9-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/54653454-2ba5-11e6-80b9-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/61f7efde-2b3d-11e6-a0ce-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/61f7efde-2b3d-11e6-a0ce-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="155643" author="laisiyao" created="Tue, 14 Jun 2016 08:45:57 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jun  5 15:27:19 trevis-10vm4 kernel: INFO: task umount:3347 blocked for more than 120 seconds.
Jun  5 15:27:19 trevis-10vm4 kernel: &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
Jun  5 15:27:19 trevis-10vm4 kernel: umount          D 000000000000d918     0  3347   3346 0x00000080
Jun  5 15:27:19 trevis-10vm4 kernel: ffff88004d367aa0 0000000000000082 ffff880029f20b80 ffff88004d367fd8
Jun  5 15:27:19 trevis-10vm4 kernel: ffff88004d367fd8 ffff88004d367fd8 ffff880029f20b80 ffffffff81daaa00
Jun  5 15:27:19 trevis-10vm4 kernel: ffff88004d367ad0 0000000101bb1f02 ffffffff81daaa00 000000000000d918
Jun  5 15:27:19 trevis-10vm4 kernel: Call Trace:
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff8163b349&amp;gt;] schedule+0x29/0x70
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff81638fa5&amp;gt;] schedule_timeout+0x175/0x2d0
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff8108bf30&amp;gt;] ? internal_add_timer+0x70/0x70
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa07b53d4&amp;gt;] obd_exports_barrier+0xc4/0x1a0 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa0de06a0&amp;gt;] mdt_device_fini+0x310/0xfc0 [mdt]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa07ce3cc&amp;gt;] class_cleanup+0x8dc/0xd70 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa07d0dac&amp;gt;] class_process_config+0x1e2c/0x2f70 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa05ee957&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa07d1fdf&amp;gt;] class_manual_cleanup+0xef/0x810 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa08032de&amp;gt;] server_put_super+0x8de/0xcd0 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811e0ca6&amp;gt;] generic_shutdown_super+0x56/0xe0
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811e1082&amp;gt;] kill_anon_super+0x12/0x20
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffffa07d5742&amp;gt;] lustre_kill_super+0x32/0x50 [obdclass]
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811e1439&amp;gt;] deactivate_locked_super+0x49/0x60
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811e1a36&amp;gt;] deactivate_super+0x46/0x60
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811fe9a5&amp;gt;] mntput_no_expire+0xc5/0x120
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff811ffadf&amp;gt;] SyS_umount+0x9f/0x3c0
Jun  5 15:27:19 trevis-10vm4 kernel: [&amp;lt;ffffffff816463c9&amp;gt;] system_call_fastpath+0x16/0x1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDT umount hung, but I don&apos;t find anything useful in the logs, and dump_exports() was called long before log dump, so they are missing in the logs, I&apos;m wondering whether this can be improved.&lt;/p&gt;

&lt;p&gt;Besides, I can&apos;t reproduce it in local test environment.&lt;/p&gt;</comment>
                            <comment id="157605" author="bfaccini" created="Mon, 4 Jul 2016 15:05:07 +0000"  >&lt;p&gt;+1 on master : &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0709014c-407f-11e6-acf3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0709014c-407f-11e6-acf3-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="159080" author="bogl" created="Mon, 18 Jul 2016 14:16:00 +0000"  >&lt;p&gt;another on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/5e5bed64-4ce7-11e6-a80f-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/5e5bed64-4ce7-11e6-a80f-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="159858" author="bfaccini" created="Tue, 26 Jul 2016 07:44:29 +0000"  >&lt;p&gt;+1 on master at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ce2d9280-52fc-11e6-bf87-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ce2d9280-52fc-11e6-bf87-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="159859" author="emoly.liu" created="Tue, 26 Jul 2016 08:05:29 +0000"  >&lt;p&gt;Another failure on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/9e634b92-5263-11e6-bf87-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/9e634b92-5263-11e6-bf87-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="160269" author="jgmitter" created="Fri, 29 Jul 2016 05:22:34 +0000"  >&lt;p&gt;Niu will be cooking a debug patch.&lt;/p&gt;</comment>
                            <comment id="160276" author="gerrit" created="Fri, 29 Jul 2016 07:02:07 +0000"  >&lt;p&gt;Niu Yawei (yawei.niu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21599&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21599&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; mdt: dump exports information on console&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 0fd2cb8ad3591fc1db3f077ba466b26d0a6171e8&lt;/p&gt;</comment>
                            <comment id="160277" author="niu" created="Fri, 29 Jul 2016 07:05:44 +0000"  >&lt;p&gt;Unfortunately, the exports information were always being truncated. I cooked a debug patch which dump the exports information on console: &lt;a href=&quot;http://review.whamcloud.com/21599&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21599&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="161704" author="yong.fan" created="Fri, 12 Aug 2016 02:33:46 +0000"  >&lt;p&gt;Hit it on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/202938f2-600c-11e6-906c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/202938f2-600c-11e6-906c-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="161706" author="emoly.liu" created="Fri, 12 Aug 2016 02:47:35 +0000"  >&lt;p&gt;Hit on master: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d7dfaba2-5e1d-11e6-b2e2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d7dfaba2-5e1d-11e6-b2e2-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="161820" author="bfaccini" created="Sat, 13 Aug 2016 14:29:27 +0000"  >&lt;p&gt;+1 on master : &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7d8efe92-60ec-11e6-b2e2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7d8efe92-60ec-11e6-b2e2-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="161894" author="niu" created="Mon, 15 Aug 2016 15:18:27 +0000"  >&lt;p&gt;The problem is reproduced with more debug information: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/34ddc070-605b-11e6-906c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/34ddc070-605b-11e6-906c-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;02:35:51:[ 1394.285106] Lustre: lustre-MDT0000 is waiting &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; obd_unlinked_exports more than 8 seconds. The obd refcount = 5. Is it stuck?
02:35:51:[ 1394.295103] Lustre: lustre-MDT0000: UNLINKED ffff8800429cb400 ec87ba17-1f41-6540-2071-22cb49f34d84 10.9.5.174@tcp 1 (0 0 0) 1 0 0 0:           (&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)  4294967369
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looks 10.9.5.174 is a Lustre client, probably the export is held by some lock? I&apos;ll scrutinize that part of code later.&lt;/p&gt;</comment>
                            <comment id="161991" author="niu" created="Tue, 16 Aug 2016 08:52:50 +0000"  >&lt;p&gt;The debug message shows the export refcount is only 1, which means no locks attached to the export, I&apos;m afraid that we missed putting export reference somewhere. This is one place in the hsm code:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;diff --git a/lustre/mdt/mdt_hsm_cdt_agent.c b/lustre/mdt/mdt_hsm_cdt_agent.c
index 38bc739..dc8e9ad 100644
--- a/lustre/mdt/mdt_hsm_cdt_agent.c
+++ b/lustre/mdt/mdt_hsm_cdt_agent.c
@@ -574,6 +574,8 @@ &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; mdt_hsm_agent_send(struct mdt_thread_info *mti,
         */
        exp = cfs_hash_lookup(mdt2obd_dev(mdt)-&amp;gt;obd_uuid_hash, &amp;amp;uuid);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (exp == NULL || exp-&amp;gt;exp_disconnected) {
+               &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (exp != NULL)
+                       class_export_put(exp);
                &lt;span class=&quot;code-comment&quot;&gt;/* This should clean up agents on evicted exports */&lt;/span&gt;
                rc = -ENOENT;
                CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;%s: agent uuid (%s) not found, unregistering:&quot;&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Not sure if it the real cause of this failure, but it needs be fixed anyway.&lt;/p&gt;</comment>
                            <comment id="161992" author="gerrit" created="Tue, 16 Aug 2016 09:02:07 +0000"  >&lt;p&gt;Niu Yawei (yawei.niu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21942&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21942&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; hsm: leaked export refcount&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: dab8cd2e6918eb11a5d656ff2687ff9067620786&lt;/p&gt;</comment>
                            <comment id="162593" author="yong.fan" created="Sat, 20 Aug 2016 00:07:35 +0000"  >&lt;p&gt;Another failure instance on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/db682194-6662-11e6-aa74-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/db682194-6662-11e6-aa74-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="162620" author="gerrit" created="Mon, 22 Aug 2016 03:45:06 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/21942/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21942/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; hsm: leaked export refcount&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 6d57484b718a986aea0ac83802b1b966df576ed8&lt;/p&gt;</comment>
                            <comment id="162648" author="bfaccini" created="Mon, 22 Aug 2016 14:34:32 +0000"  >&lt;p&gt;Hello Niu,&lt;br/&gt;
I also encountered a similar problem with one of my master review at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/4168f756-60ec-11e6-b2e2-5254006e85c2f&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/4168f756-60ec-11e6-b2e2-5254006e85c2f&lt;/a&gt;. The failed session had been linked to this ticket as the cause. But looking into the whole tests suite/sequence, there was no previous execution of sanity-hsm and my own/concerned change is only affecting conf-sanity/test_41c that had been run before but with multiple MDT stop/fail successful in between. So I may be wrong, but unless there is some HSM activity outside sanity-hsm in others tests suites, I wonder if, as you already suspected, there may be other places in our code where we miss to put an export reference.&lt;/p&gt;</comment>
                            <comment id="163025" author="niu" created="Wed, 24 Aug 2016 16:10:55 +0000"  >&lt;p&gt;Bruno, indeed, I agree with you, it&apos;s not necessary caused by the reference leak in hsm code, I&apos;ve updated the debug patch and hope it can get us more useful information once it&apos;s hit again.&lt;/p&gt;</comment>
                            <comment id="165225" author="gerrit" created="Thu, 8 Sep 2016 02:05:43 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/21599/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21599/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; mdt: dump exports information on console&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f1c9166dc3172beecbd659686fe419f52138cefd&lt;/p&gt;</comment>
                            <comment id="165251" author="pjones" created="Thu, 8 Sep 2016 04:17:24 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                            <comment id="165262" author="niu" created="Thu, 8 Sep 2016 05:20:51 +0000"  >&lt;p&gt;The landed patch is to improve the debug information, not fixing the real problem.&lt;/p&gt;</comment>
                            <comment id="165795" author="yong.fan" created="Tue, 13 Sep 2016 04:25:38 +0000"  >&lt;p&gt;Another failure instance on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/62e575f2-7682-11e6-b08e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/62e575f2-7682-11e6-b08e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="165888" author="jamesanunez" created="Tue, 13 Sep 2016 18:02:30 +0000"  >&lt;p&gt;Niu, &lt;br/&gt;
Here is a failure that should include your debug patch: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/38e4f102-79bf-11e6-b08e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/38e4f102-79bf-11e6-b08e-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;From the MDS 1 log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;09:49:55:[26605.283675] Lustre: lustre-MDT0000-osp-MDT0002: Connection to lustre-MDT0000 (at 0@lo) was lost; in progress operations using this service will wait for recovery to complete
09:49:55:[26605.288046] Lustre: Skipped 24 previous similar messages
09:49:55:[26607.056252] LustreError: 30936:0:(client.c:1168:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880046125200 x1545343953854000/t0(0) o13-&amp;gt;lustre-OST0005-osc-MDT0000@10.9.4.148@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
09:49:55:[26607.064057] LustreError: 30936:0:(client.c:1168:ptlrpc_import_delay_req()) Skipped 23 previous similar messages
09:49:55:[26618.703121] Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 5. Is it stuck?
09:49:55:[26618.711819] Lustre: lustre-MDT0000: UNLINKED ffff8800471ee000 dbac7ff5-8b18-2c65-a249-73e04ce7d6a8 10.9.4.146@tcp 1 (0 0 0) 1 0 0 0:           (null)  4294967369 stale:0
09:49:55:[26634.716121] Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 16 seconds. The obd refcount = 5. Is it stuck?
09:49:55:[26634.723386] Lustre: lustre-MDT0000: UNLINKED ffff8800471ee000 dbac7ff5-8b18-2c65-a249-73e04ce7d6a8 10.9.4.146@tcp 1 (0 0 0) 1 0 0 0:           (null)  4294967369 stale:0
09:49:55:[26666.728048] Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 32 seconds. The obd refcount = 5. Is it stuck?
09:49:55:[26666.734510] Lustre: lustre-MDT0000: UNLINKED ffff8800471ee000 dbac7ff5-8b18-2c65-a249-73e04ce7d6a8 10.9.4.146@tcp 1 (0 0 0) 1 0 0 0:           (null)  4294967369 stale:0
09:49:55:[26680.276322] Lustre: lustre-MDT0000: Not available for connect from 0@lo (stopping)
09:49:55:[26680.281813] Lustre: Skipped 231 previous similar messages
09:49:55:[26730.739132] Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 64 seconds. The obd refcount = 5. Is it stuck?
09:49:55:[26730.745251] Lustre: lustre-MDT0000: UNLINKED ffff8800471ee000 dbac7ff5-8b18-2c65-a249-73e04ce7d6a8 10.9.4.146@tcp 1 (0 0 0) 1 0 0 0:           (null)  4294967369 stale:0
09:49:55:[26831.719420] Lustre: lustre-MDT0000: Not available for connect from 10.9.4.146@tcp (stopping)
09:49:55:[26831.723246] Lustre: Skipped 481 previous similar messages
09:49:55:[26858.750122] Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 128 seconds. The obd refcount = 5. Is it stuck?
09:49:55:[26858.761521] Lustre: lustre-MDT0000: UNLINKED ffff8800471ee000 dbac7ff5-8b18-2c65-a249-73e04ce7d6a8 10.9.4.146@tcp 1 (0 0 0) 1 0 0 0:           (null)  4294967369 stale:0
09:49:55:[27000.758142] INFO: task umount:13385 blocked for more than 120 seconds.
09:49:55:[27000.762771] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
09:49:55:[27000.765657] umount          D 000000000000f918     0 13385  13384 0x00000080
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Another at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1dbc2ffc-7b23-11e6-8afd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1dbc2ffc-7b23-11e6-8afd-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="166337" author="niu" created="Mon, 19 Sep 2016 02:58:27 +0000"  >&lt;p&gt;Thank you for the information, James, I&apos;m looking at this issue. The console log shows that there is no pending RPC, locks, or commit callback is holding the export, so I&apos;m wondering if it because someone missed puting reference.&lt;/p&gt;</comment>
                            <comment id="166538" author="adilger" created="Tue, 20 Sep 2016 14:10:12 +0000"  >&lt;p&gt;Is this bug a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7858&quot; title=&quot;test_23: &amp;#39;test failed to respond and timed out&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7858&quot;&gt;&lt;del&gt;LU-7858&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="167006" author="niu" created="Fri, 23 Sep 2016 04:03:04 +0000"  >&lt;p&gt;Looks there is a race can cause export reference leak on difficult reply (and when rs_no_ack enabled):&lt;/p&gt;

&lt;p&gt;MDT umount calls server_disconnect_export() to disconnects it&apos;s export, server_disconnect_exports() tries to complete all it&apos;s outstanding (difficult) replies, that&apos;ll call into ptlrpc_handle_rs() at the end, where the reply sate is removed from uncommited list, and LNetMDUnlink() is called to trigger an unlink event, reply_out_callback() catches the event and try to finialize the reply sate (where the export hold by reply state will be released) by following code:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                spin_lock(&amp;amp;svcpt-&amp;gt;scp_rep_lock);
                spin_lock(&amp;amp;rs-&amp;gt;rs_lock);

                rs-&amp;gt;rs_on_net = 0;
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (!rs-&amp;gt;rs_no_ack ||
                    rs-&amp;gt;rs_transno &amp;lt;=
                    rs-&amp;gt;rs_export-&amp;gt;exp_obd-&amp;gt;obd_last_committed)
                        ptlrpc_schedule_difficult_reply(rs);

                spin_unlock(&amp;amp;rs-&amp;gt;rs_lock);
                spin_unlock(&amp;amp;svcpt-&amp;gt;scp_rep_lock);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We can see if rs_no_ack is false (COS not enabled), or the transaction has been committed, the reply state is finalized here, otherwise, it relies on commit callback, however, for the export already been disconnected by umount or whatever reason, the reply state is already being removed from the uncommitted list, so nobody will execute the reply state (until unload ptlrpc module). I think adding one more check here to see if the reply state is still on uncommitted list is necessary.&lt;/p&gt;

&lt;p&gt;COS is usually not enabled, so this defect won&apos;t be triggered easily, however by the change of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3538&quot; title=&quot;commit on share for cross-MDT operation.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3538&quot;&gt;&lt;del&gt;LU-3538&lt;/del&gt;&lt;/a&gt;, rs_no_ack is set unpurposely no matter if COS is enabled:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;@@ -2530,24 +2589,27 @@ &lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; void mdt_save_lock(struct mdt_thread_info *info, struct lustre_handle *h,
                        struct mdt_device *mdt = info-&amp;gt;mti_mdt;
                        struct ldlm_lock *lock = ldlm_handle2lock(h);
                        struct ptlrpc_request *req = mdt_info_req(info);
-                       &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; no_ack = 0;
+                       &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; cos;
+
+                       cos = (mdt_cos_is_enabled(mdt) ||
+                              mdt_slc_is_enabled(mdt));

                        LASSERTF(lock != NULL, &lt;span class=&quot;code-quote&quot;&gt;&quot;no lock &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; cookie &quot;&lt;/span&gt;LPX64&lt;span class=&quot;code-quote&quot;&gt;&quot;\n&quot;&lt;/span&gt;,
                                 h-&amp;gt;cookie);
+
                        /* there is no request &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; mdt_object_unlock() is called
                         * from mdt_export_cleanup()-&amp;gt;mdt_add_dirty_flag() */
                        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (likely(req != NULL)) {
                                CDEBUG(D_HA, &lt;span class=&quot;code-quote&quot;&gt;&quot;request = %p reply state = %p&quot;&lt;/span&gt;
                                       &lt;span class=&quot;code-quote&quot;&gt;&quot; transno = &quot;&lt;/span&gt;LPD64&lt;span class=&quot;code-quote&quot;&gt;&quot;\n&quot;&lt;/span&gt;, req,
                                       req-&amp;gt;rq_reply_state, req-&amp;gt;rq_transno);
-                               &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (mdt_cos_is_enabled(mdt)) {
-                                       no_ack = 1;
+                               &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (cos) {
                                        ldlm_lock_downgrade(lock, LCK_COS);
                                        mode = LCK_COS;
                                }
-                               ptlrpc_save_lock(req, h, mode, no_ack);
+                               ptlrpc_save_lock(req, h, mode, cos);
                        } &lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt; {
-                               ldlm_lock_decref(h, mode);
+                               mdt_fid_unlock(h, mode);
                        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;That could be the reason why it can be triggered more easily today.&lt;/p&gt;</comment>
                            <comment id="167009" author="gerrit" created="Fri, 23 Sep 2016 04:10:31 +0000"  >&lt;p&gt;Niu Yawei (yawei.niu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/22696&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/22696&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; ptlrpc: leaked rs on difficult reply&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 802f41d1a78f588ab99ef8e1d081ac3da7cb8551&lt;/p&gt;</comment>
                            <comment id="167020" author="laisiyao" created="Fri, 23 Sep 2016 05:24:20 +0000"  >&lt;p&gt;Indeed, this occurs when REPLY-ACK is disabled.&lt;/p&gt;

&lt;p&gt;In &lt;a href=&quot;http://review.whamcloud.com/#/c/12530/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/12530/&lt;/a&gt;, to save local locks, REPLY-ACK is disabled, however for local operations, Commit-on-Sharing doesn&apos;t take effect, therefore we may not be able to replay in transno order if reply is lost. This is another issue, I&apos;ll fix it in another ticket.&lt;/p&gt;</comment>
                            <comment id="167021" author="niu" created="Fri, 23 Sep 2016 05:27:12 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;http://review.whamcloud.com/#/c/12530/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/12530/&lt;/a&gt; is merged in master on Jan 29, 2016, then we search the maloo failures:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/query?commit=Update+results&amp;amp;hosts=&amp;amp;page=9&amp;amp;query_bugs=&amp;amp;status%5B%5D=TIMEOUT&amp;amp;test_node%5Barchitecture_type_id%5D=&amp;amp;test_node%5Bdistribution_type_id%5D=&amp;amp;test_node%5Bfile_system_type_id%5D=&amp;amp;test_node%5Blustre_branch_id%5D=24a6947e-04a9-11e1-bb5f-52540025f9af&amp;amp;test_node%5Bos_type_id%5D=&amp;amp;test_node_network%5Bnetwork_type_id%5D=&amp;amp;test_session%5Bend_date%5D=&amp;amp;test_session%5Bquery_recent_period%5D=&amp;amp;test_session%5Bstart_date%5D=&amp;amp;test_session%5Buser_id%5D=&amp;amp;test_set%5Btest_set_script_id%5D=f36cabd0-32c3-11e0-a61c-52540025f9ae&amp;amp;utf8=%E2%9C%93&amp;amp;warn%5Bnotice%5D=true&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/query?commit=Update+results&amp;amp;hosts=&amp;amp;page=9&amp;amp;query_bugs=&amp;amp;status%5B%5D=TIMEOUT&amp;amp;test_node%5Barchitecture_type_id%5D=&amp;amp;test_node%5Bdistribution_type_id%5D=&amp;amp;test_node%5Bfile_system_type_id%5D=&amp;amp;test_node%5Blustre_branch_id%5D=24a6947e-04a9-11e1-bb5f-52540025f9af&amp;amp;test_node%5Bos_type_id%5D=&amp;amp;test_node_network%5Bnetwork_type_id%5D=&amp;amp;test_session%5Bend_date%5D=&amp;amp;test_session%5Bquery_recent_period%5D=&amp;amp;test_session%5Bstart_date%5D=&amp;amp;test_session%5Buser_id%5D=&amp;amp;test_set%5Btest_set_script_id%5D=f36cabd0-32c3-11e0-a61c-52540025f9ae&amp;amp;utf8=%E2%9C%93&amp;amp;warn%5Bnotice%5D=true&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This bug showed up immediately after that time point (was reported as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7172&quot; title=&quot;replay-single test_70d hung on MDT unmount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7172&quot;&gt;&lt;del&gt;LU-7172&lt;/del&gt;&lt;/a&gt; before, so I think &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7172&quot; title=&quot;replay-single test_70d hung on MDT unmount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7172&quot;&gt;&lt;del&gt;LU-7172&lt;/del&gt;&lt;/a&gt; is dup of this).&lt;/p&gt;</comment>
                            <comment id="169566" author="gerrit" created="Thu, 13 Oct 2016 23:36:24 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/22696/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/22696/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7903&quot; title=&quot;recovery-small test_23: hang on umount&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7903&quot;&gt;&lt;del&gt;LU-7903&lt;/del&gt;&lt;/a&gt; ptlrpc: leaked rs on difficult reply&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: bd1441b92e31cd3c01cc2453aa627d139a1207f7&lt;/p&gt;</comment>
                            <comment id="169583" author="pjones" created="Fri, 14 Oct 2016 00:56:51 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="31538">LU-7022</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32166">LU-7172</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="35229">LU-7858</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzy5af:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>