<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:40:08 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4152]  layout locks can cause deadlock</title>
                <link>https://jira.whamcloud.com/browse/LU-4152</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1876&quot; title=&quot;Layout Lock Server Patch Landings to Master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1876&quot;&gt;&lt;del&gt;LU-1876&lt;/del&gt;&lt;/a&gt; adds mdt_object_open_lock() which acquires lock in 2 steps for layout locks.&lt;br/&gt;
A deadlock is possible since it isn&apos;t atomic and ibits locks are reprocessed until first blocking lock found.&lt;/p&gt;

&lt;p&gt;Such situation was hit with mdt_reint_open() &amp;amp; mdt_intent_getattr()&lt;/p&gt;

&lt;p&gt;mdt_reint_open()-&amp;gt;mdt_open_by_fid_lock() takes first part of the lock (ibits=5), &lt;br/&gt;
mdt_intent_getattr() tries to obtain lock (ibits=17)&lt;br/&gt;
mdt_open_by_fid_lock() tries to obtain second part but fails due to some conflict with another layout lock2. During cancellation of lock2 only getattr lock is reprocessed.&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/7148/1&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7148/1&lt;/a&gt; can help, but it is better to fix mdt_open_by_fid_lock()&lt;/p&gt;</description>
                <environment>le </environment>
        <key id="21658">LU-4152</key>
            <summary> layout locks can cause deadlock</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="green">Oleg Drokin</assignee>
                                    <reporter username="askulysh">Andriy Skulysh</reporter>
                        <labels>
                            <label>HSM</label>
                    </labels>
                <created>Mon, 28 Oct 2013 15:24:25 +0000</created>
                <updated>Tue, 16 Dec 2014 14:57:20 +0000</updated>
                            <resolved>Wed, 12 Feb 2014 17:47:53 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.1</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>14</watches>
                                                                            <comments>
                            <comment id="70021" author="jlevi" created="Mon, 28 Oct 2013 16:27:23 +0000"  >&lt;p&gt;Duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3601&quot; title=&quot;HSM release causes running restore to hang, hangs itself&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3601&quot;&gt;&lt;del&gt;LU-3601&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="70044" author="jhammond" created="Mon, 28 Oct 2013 17:47:16 +0000"  >&lt;p&gt;Not a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3601&quot; title=&quot;HSM release causes running restore to hang, hangs itself&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3601&quot;&gt;&lt;del&gt;LU-3601&lt;/del&gt;&lt;/a&gt; since that issue describes a similar problem that could have been addresses using &lt;a href=&quot;http://review.whamcloud.com/#/c/7148/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7148/&lt;/a&gt; but was eventually fixed by changing the locking used by the coordinator.&lt;/p&gt;</comment>
                            <comment id="70092" author="green" created="Tue, 29 Oct 2013 02:46:21 +0000"  >&lt;p&gt;I believe I have a patch for this in &lt;a href=&quot;http://review.whamcloud.com/8083&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8083&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Now, can you please share exact reproducing details (And also please give the patch a try).&lt;/p&gt;</comment>
                            <comment id="70097" author="green" created="Tue, 29 Oct 2013 07:01:02 +0000"  >&lt;p&gt;lso there&apos;s a patch with alternative approach at &lt;a href=&quot;http://review.whamcloud.com/8088&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8088&lt;/a&gt;&lt;br/&gt;
we might want this one to go into master only and not 2.5 since it&apos;s somewhat more involved.&lt;br/&gt;
Please also give this one a try.&lt;/p&gt;</comment>
                            <comment id="70186" author="paf" created="Tue, 29 Oct 2013 21:18:34 +0000"  >&lt;p&gt;Our first test run with the simpler of the two patches has locked up.  The original lock re-ordering patch from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3601&quot; title=&quot;HSM release causes running restore to hang, hangs itself&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3601&quot;&gt;&lt;del&gt;LU-3601&lt;/del&gt;&lt;/a&gt; allowed us to complete the test run successfully.&lt;br/&gt;
I&apos;m going to make the dump available here shortly.  It&apos;s possible it&apos;s the same issue, or it&apos;s possible there&apos;s some other layout lock related livelock that John&apos;s original &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3601&quot; title=&quot;HSM release causes running restore to hang, hangs itself&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3601&quot;&gt;&lt;del&gt;LU-3601&lt;/del&gt;&lt;/a&gt; patch also allowed us to avoid.&lt;/p&gt;

&lt;p&gt;The second patch LBUGged almost immediately:&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: LustreError: 17452:0:(mdt_handler.c:3841:mdt_intent_reint()) ASSERTION( lustre_handle_is_used(&amp;amp;lhc-&amp;gt;mlh_reg_lh) ) failed:&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: LustreError: 17452:0:(mdt_handler.c:3841:mdt_intent_reint()) LBUG&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: Pid: 17452, comm: mdt02_003&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel:&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: Call Trace:&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a20895&amp;gt;&amp;#93;&lt;/span&gt; libcfs_debug_dumpstack+0x55/0x80 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a20e97&amp;gt;&amp;#93;&lt;/span&gt; lbug_with_loc+0x47/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa13ab367&amp;gt;&amp;#93;&lt;/span&gt; mdt_intent_reint+0x4e7/0x520 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa13a8f1e&amp;gt;&amp;#93;&lt;/span&gt; mdt_intent_policy+0x39e/0x720 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0cc1831&amp;gt;&amp;#93;&lt;/span&gt; ldlm_lock_enqueue+0x361/0x8d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0ce893f&amp;gt;&amp;#93;&lt;/span&gt; ldlm_handle_enqueue0+0x4ef/0x10b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa13a93a6&amp;gt;&amp;#93;&lt;/span&gt; mdt_enqueue+0x46/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa13afa97&amp;gt;&amp;#93;&lt;/span&gt; mdt_handle_common+0x647/0x16d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d0b40c&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_msg_get_transno+0x8c/0x100 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa13e94d5&amp;gt;&amp;#93;&lt;/span&gt; mds_regular_handle+0x15/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d1ac28&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_server_handle_request+0x398/0xc60 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a215de&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_timer_arm+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a32ebf&amp;gt;&amp;#93;&lt;/span&gt; ? lc_watchdog_touch+0x6f/0x170 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d11f89&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_wait_event+0xa9/0x290 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81055ad3&amp;gt;&amp;#93;&lt;/span&gt; ? __wake_up+0x53/0x70&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d1bfbe&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_main+0xace/0x1700 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d1b4f0&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1700 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0ca&amp;gt;&amp;#93;&lt;/span&gt; child_rip+0xa/0x20&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d1b4f0&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1700 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d1b4f0&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1700 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
Oct 29 15:52:23 perses-esf-mds001 perses-esf-mds001 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0c0&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0x0/0x20&lt;/p&gt;</comment>
                            <comment id="70190" author="paf" created="Tue, 29 Oct 2013 21:34:29 +0000"  >&lt;p&gt;MDS dump from apparent live lock with the first patch is up on Cray&apos;s FTP at:&lt;br/&gt;
ftp.cray.com&lt;br/&gt;
u: anonymous&lt;br/&gt;
p: anonymous&lt;/p&gt;

&lt;p&gt;Then:&lt;br/&gt;
cd outbound/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;&lt;br/&gt;
And then the file is:&lt;br/&gt;
&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;-patch_8083.tar.gz&lt;/p&gt;

&lt;p&gt;Note that anonymous doesn&apos;t have permission to ls in outbound or in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;, but a get command will work fine.&lt;/p&gt;</comment>
                            <comment id="70196" author="green" created="Tue, 29 Oct 2013 22:01:01 +0000"  >&lt;p&gt;apparently the vmcore that&apos;s part of this file is for the second panic, because I see the panic message inside.&lt;/p&gt;</comment>
                            <comment id="70197" author="paf" created="Tue, 29 Oct 2013 22:10:00 +0000"  >&lt;p&gt;Oops.  You&apos;re right.&lt;/p&gt;

&lt;p&gt;My apologies, Oleg, I grabbed the wrong dump package.  Just a minute here.&lt;/p&gt;</comment>
                            <comment id="70198" author="paf" created="Tue, 29 Oct 2013 22:19:01 +0000"  >&lt;p&gt;The correct dump is now there.&lt;/p&gt;

&lt;p&gt;There are now two files in outbound/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;:&lt;br/&gt;
&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;-patch_8083.tar.gz&lt;br/&gt;
&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4152&quot; title=&quot; layout locks can cause deadlock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4152&quot;&gt;&lt;del&gt;LU-4152&lt;/del&gt;&lt;/a&gt;-patch_8088.tar.gz&lt;/p&gt;

&lt;p&gt;The file named for patch 8083 NOW correctly contains the dump from the hang we saw with patch 8083.&lt;br/&gt;
The one named for patch 8088 is the dump I uploaded previously, which has the LBUG in it.&lt;/p&gt;</comment>
                            <comment id="70209" author="green" created="Tue, 29 Oct 2013 23:11:33 +0000"  >&lt;p&gt;I see.&lt;br/&gt;
Duh, apparently I made a check with a wrong lock handle.&lt;br/&gt;
Please update the patch with this fix and try again (note it introduces some sort of a crash on client shutdown, that I&apos;ll tackle soon, but I want to make sure it really kills the problem this time.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;--- a/lustre/mdt/mdt_open.c
+++ b/lustre/mdt/mdt_open.c
@@ -1278,7 +1278,7 @@ static int mdt_object_open_lock(struct mdt_thread_info *in
                 * As such we&apos;ll drop the open lock we just got above here,
                 * it&apos;s ok not to have this open lock as it&apos;s main purpose is to
                 * flush unused cached client open handles. */
-               if (lustre_handle_is_used(&amp;amp;ll-&amp;gt;mlh_reg_lh))
+               if (lustre_handle_is_used(&amp;amp;lhc-&amp;gt;mlh_reg_lh))
                        mdt_object_unlock(info, obj, lhc, 1);
 
                LASSERT(!try_layout);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="70311" author="paf" created="Wed, 30 Oct 2013 21:28:58 +0000"  >&lt;p&gt;Oleg,&lt;/p&gt;

&lt;p&gt;We&apos;ve just tested the fixed patch and it appears to resolve the issue.  We&apos;ve run our reproducer twice now, and it&apos;s hit every time we&apos;ve run with unpatched code.&lt;br/&gt;
Looks like it fixes it - If you can address the client shutdown crash, I think it&apos;s good to go.&lt;/p&gt;</comment>
                            <comment id="70312" author="paf" created="Wed, 30 Oct 2013 21:53:37 +0000"  >&lt;p&gt;By the way, here&apos;s the stack trace from our client dump on shutdown..  It seems simple enough, just an unreleased reference, presumably to one of those locks:&lt;br/&gt;
2013-10-30T16:34:38.526134-05:00 c0-0c0s2n3 LustreError: 11673:0:(lu_object.c:1141:lu_device_fini()) ASSERTION( cfs_atomic_read(&amp;amp;d-&amp;gt;ld_ref) == 0 ) failed: Refcount is 1&lt;br/&gt;
2013-10-30T16:34:38.526158-05:00 c0-0c0s2n3 LustreError: 11673:0:(lu_object.c:1141:lu_device_fini()) LBUG&lt;br/&gt;
2013-10-30T16:34:38.526169-05:00 c0-0c0s2n3 Pid: 11673, comm: umount&lt;br/&gt;
2013-10-30T16:34:38.526175-05:00 c0-0c0s2n3 Call Trace:&lt;br/&gt;
2013-10-30T16:34:38.555669-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81007e59&amp;gt;&amp;#93;&lt;/span&gt; try_stack_unwind+0x1a9/0x200&lt;br/&gt;
2013-10-30T16:34:38.555680-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81006625&amp;gt;&amp;#93;&lt;/span&gt; dump_trace+0x95/0x300&lt;br/&gt;
2013-10-30T16:34:38.555693-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02a28d7&amp;gt;&amp;#93;&lt;/span&gt; libcfs_debug_dumpstack+0x57/0x80 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.555700-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02a2e27&amp;gt;&amp;#93;&lt;/span&gt; lbug_with_loc+0x47/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.555725-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa05190c7&amp;gt;&amp;#93;&lt;/span&gt; lu_device_fini+0x87/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.555734-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa094efd4&amp;gt;&amp;#93;&lt;/span&gt; lovsub_device_free+0x24/0x160 &lt;span class=&quot;error&quot;&gt;&amp;#91;lov&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585779-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa051cd0e&amp;gt;&amp;#93;&lt;/span&gt; lu_stack_fini+0x7e/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585790-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0522c1e&amp;gt;&amp;#93;&lt;/span&gt; cl_stack_fini+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585801-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa093e9a8&amp;gt;&amp;#93;&lt;/span&gt; lov_device_fini+0x58/0x120 &lt;span class=&quot;error&quot;&gt;&amp;#91;lov&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585866-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa051ccda&amp;gt;&amp;#93;&lt;/span&gt; lu_stack_fini+0x4a/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585887-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0522c1e&amp;gt;&amp;#93;&lt;/span&gt; cl_stack_fini+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.585896-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a1edec&amp;gt;&amp;#93;&lt;/span&gt; cl_sb_fini+0x6c/0x1a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.615774-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa09e4fc5&amp;gt;&amp;#93;&lt;/span&gt; client_common_put_super+0x55/0xad0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.615784-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa09e5b05&amp;gt;&amp;#93;&lt;/span&gt; ll_put_super+0xc5/0x330 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.647596-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8111546d&amp;gt;&amp;#93;&lt;/span&gt; generic_shutdown_super+0x5d/0x110&lt;br/&gt;
2013-10-30T16:34:38.647614-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81115586&amp;gt;&amp;#93;&lt;/span&gt; kill_anon_super+0x16/0x60&lt;br/&gt;
2013-10-30T16:34:38.647620-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa050f2c6&amp;gt;&amp;#93;&lt;/span&gt; lustre_kill_super+0x36/0x50 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.647626-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81115a23&amp;gt;&amp;#93;&lt;/span&gt; deactivate_super+0x73/0x90&lt;br/&gt;
2013-10-30T16:34:38.673196-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8112db02&amp;gt;&amp;#93;&lt;/span&gt; mntput_no_expire+0xc2/0xf0&lt;br/&gt;
2013-10-30T16:34:38.673208-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8112debc&amp;gt;&amp;#93;&lt;/span&gt; sys_umount+0x7c/0x360&lt;br/&gt;
2013-10-30T16:34:38.673213-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100305b&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b&lt;br/&gt;
2013-10-30T16:34:38.673237-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;00007fe4da8afd07&amp;gt;&amp;#93;&lt;/span&gt; 0x7fe4da8afd07&lt;br/&gt;
2013-10-30T16:34:38.673245-05:00 c0-0c0s2n3 Kernel panic - not syncing: LBUG&lt;br/&gt;
2013-10-30T16:34:38.698848-05:00 c0-0c0s2n3 Pid: 11673, comm: umount Tainted: P           2.6.32.59-0.7.1_1.0402.7496-cray_gem_s #1&lt;br/&gt;
2013-10-30T16:34:38.698859-05:00 c0-0c0s2n3 Call Trace:&lt;br/&gt;
2013-10-30T16:34:38.698882-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81007e59&amp;gt;&amp;#93;&lt;/span&gt; try_stack_unwind+0x1a9/0x200&lt;br/&gt;
2013-10-30T16:34:38.698891-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81006625&amp;gt;&amp;#93;&lt;/span&gt; dump_trace+0x95/0x300&lt;br/&gt;
2013-10-30T16:34:38.698896-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100786c&amp;gt;&amp;#93;&lt;/span&gt; show_trace_log_lvl+0x5c/0x80&lt;br/&gt;
2013-10-30T16:34:38.698911-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff810078a5&amp;gt;&amp;#93;&lt;/span&gt; show_trace+0x15/0x20&lt;br/&gt;
2013-10-30T16:34:38.698916-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81427e15&amp;gt;&amp;#93;&lt;/span&gt; dump_stack+0x77/0x82&lt;br/&gt;
2013-10-30T16:34:38.698921-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81427e9a&amp;gt;&amp;#93;&lt;/span&gt; panic+0x7a/0x165&lt;br/&gt;
2013-10-30T16:34:38.724451-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02a2e7b&amp;gt;&amp;#93;&lt;/span&gt; lbug_with_loc+0x9b/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.724462-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa05190c7&amp;gt;&amp;#93;&lt;/span&gt; lu_device_fini+0x87/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.724484-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa094efd4&amp;gt;&amp;#93;&lt;/span&gt; lovsub_device_free+0x24/0x160 &lt;span class=&quot;error&quot;&gt;&amp;#91;lov&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750105-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa051cd0e&amp;gt;&amp;#93;&lt;/span&gt; lu_stack_fini+0x7e/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750118-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0522c1e&amp;gt;&amp;#93;&lt;/span&gt; cl_stack_fini+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750142-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa093e9a8&amp;gt;&amp;#93;&lt;/span&gt; lov_device_fini+0x58/0x120 &lt;span class=&quot;error&quot;&gt;&amp;#91;lov&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750151-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa051ccda&amp;gt;&amp;#93;&lt;/span&gt; lu_stack_fini+0x4a/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750156-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0522c1e&amp;gt;&amp;#93;&lt;/span&gt; cl_stack_fini+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.750168-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a1edec&amp;gt;&amp;#93;&lt;/span&gt; cl_sb_fini+0x6c/0x1a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.775711-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa09e4fc5&amp;gt;&amp;#93;&lt;/span&gt; client_common_put_super+0x55/0xad0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.775723-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa09e5b05&amp;gt;&amp;#93;&lt;/span&gt; ll_put_super+0xc5/0x330 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.775749-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8111546d&amp;gt;&amp;#93;&lt;/span&gt; generic_shutdown_super+0x5d/0x110&lt;br/&gt;
2013-10-30T16:34:38.775760-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81115586&amp;gt;&amp;#93;&lt;/span&gt; kill_anon_super+0x16/0x60&lt;br/&gt;
2013-10-30T16:34:38.775766-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa050f2c6&amp;gt;&amp;#93;&lt;/span&gt; lustre_kill_super+0x36/0x50 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
2013-10-30T16:34:38.801373-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81115a23&amp;gt;&amp;#93;&lt;/span&gt; deactivate_super+0x73/0x90&lt;br/&gt;
2013-10-30T16:34:38.801384-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8112db02&amp;gt;&amp;#93;&lt;/span&gt; mntput_no_expire+0xc2/0xf0&lt;br/&gt;
2013-10-30T16:34:38.801410-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8112debc&amp;gt;&amp;#93;&lt;/span&gt; sys_umount+0x7c/0x360&lt;br/&gt;
2013-10-30T16:34:38.801419-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100305b&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b&lt;br/&gt;
2013-10-30T16:34:38.801429-05:00 c0-0c0s2n3 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;00007fe4da8afd07&amp;gt;&amp;#93;&lt;/span&gt; 0x7fe4da8afd07&lt;/p&gt;</comment>
                            <comment id="70813" author="paf" created="Wed, 6 Nov 2013 03:08:29 +0000"  >&lt;p&gt;We&apos;ve now observed this same deadlock in a non-NFS situation as well.&lt;/p&gt;

&lt;p&gt;Oleg - If you can give a pointer on how to get the client reference releasing correctly, I&apos;d be happy to generate and test the patch.&lt;/p&gt;</comment>
                            <comment id="70965" author="askulysh" created="Thu, 7 Nov 2013 13:05:25 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2013-10-30T16:34:38.526134-05:00 c0-0c0s2n3 LustreError: 11673:0:(lu_object.c:1141:lu_device_fini()) ASSERTION( cfs_atomic_read(&amp;amp;d-&amp;gt;ld_ref) == 0 ) failed: Refcount is 1
2013-10-30T16:34:38.526158-05:00 c0-0c0s2n3 LustreError: 11673:0:(lu_object.c:1141:lu_device_fini()) LBUG
2013-10-30T16:34:38.526169-05:00 c0-0c0s2n3 Pid: 11673, comm: umount
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This LBUG appeared due to &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;commit d10c9e05ab593f361fdfd27842766a1924e63e58
Author: yangsheng &amp;lt;ys@whamcloud.com&amp;gt;
Date:   Sat May 26 02:12:36 2012 +0800

    LU-506 kernel: FC15 - support dcache scalability changes.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;  
&lt;p&gt;during shutdown ll_file_release() and final dput() is called before ll_md_blocking_ast() and d_lustre_invalidate(), so there is no condition to call d_kill()&lt;/p&gt;</comment>
                            <comment id="70978" author="paf" created="Thu, 7 Nov 2013 15:52:46 +0000"  >&lt;p&gt;Andriy - Can you be more specific about what portion of that patch you believe caused this issue, and why you think it&apos;s at fault here from the stack trace?&lt;/p&gt;</comment>
                            <comment id="71879" author="green" created="Tue, 19 Nov 2013 13:49:27 +0000"  >&lt;p&gt;Well, I came to a conclusion that the assertion is in fact a preexisting bug condition since I can reproduce it at will without any patches with this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;sh llmount.sh
echo 0 &amp;gt;/proc/sys/lnet/panic_on_lbug
service nfs start
mount localhost:/mnt/lustre /mnt/nfs -t nfs
touch /mnt/nfs/file
ls -l /mnt/lustre
cp -f /etc/passwd /mnt/nfs/file

umount /mnt/nfs
service nfs stop
sh llmountcleanup.sh
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I am going to open a new bug for this issue since it seems to be separate.&lt;/p&gt;</comment>
                            <comment id="71886" author="green" created="Tue, 19 Nov 2013 14:11:58 +0000"  >&lt;p&gt;Opened &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4272&quot; title=&quot;lu_device_fini()) ASSERTION( cfs_atomic_read(&amp;amp;d-&amp;gt;ld_ref) == 0 ) failed from lovsub_device_free&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4272&quot;&gt;&lt;del&gt;LU-4272&lt;/del&gt;&lt;/a&gt;&lt;br/&gt;
Updated the patch 4083 to fix the typo.&lt;/p&gt;</comment>
                            <comment id="71906" author="paf" created="Tue, 19 Nov 2013 17:20:30 +0000"  >&lt;p&gt;Thank you for the update, Oleg.  It&apos;s nice to know the crash is unrelated.&lt;/p&gt;</comment>
                            <comment id="76659" author="adilger" created="Mon, 10 Feb 2014 21:37:19 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/8083&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8083&lt;/a&gt; landed to master.  What else is still needed to close this bug?&lt;/p&gt;</comment>
                            <comment id="76862" author="jlevi" created="Wed, 12 Feb 2014 17:47:53 +0000"  >&lt;p&gt;Patch landed to Master. Closing ticket.&lt;/p&gt;</comment>
                            <comment id="76945" author="adegremont" created="Thu, 13 Feb 2014 08:19:41 +0000"  >&lt;p&gt;Are we sure this problem should not be fixed in 2.5 too? &lt;/p&gt;</comment>
                            <comment id="76957" author="pjones" created="Thu, 13 Feb 2014 13:21:45 +0000"  >&lt;p&gt;Aurelien&lt;/p&gt;

&lt;p&gt;That would be tracked separately&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="19860">LU-3601</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="27867">LU-6015</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="15863">LU-1876</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="22524">LU-4398</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="19860">LU-3601</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzw6xz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11269</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>