<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:59:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6368] ASSERTION( cur-&gt;oe_dlmlock == victim-&gt;oe_dlmlock ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-6368</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I can consistently crash the lustre client with the reproducer attached.&lt;/p&gt;

&lt;p&gt;Info from the logs:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;0&amp;gt;LustreError: 26474:0:(osc_cache.c:519:osc_extent_merge()) ASSERTION( cur-&amp;gt;oe_dlmlock == victim-&amp;gt;oe_dlmlock ) failed: 
&amp;lt;0&amp;gt;LustreError: 26474:0:(osc_cache.c:519:osc_extent_merge()) LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Stack trace from crash:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;crash&amp;gt; bt
PID: 26474  TASK: ffff88003747caa0  CPU: 3   COMMAND: &quot;llsendfile3&quot;
 #0 [ffff88001a2835f0] machine_kexec at ffffffff81038f3b
 #1 [ffff88001a283650] crash_kexec at ffffffff810c5b62
 #2 [ffff88001a283720] panic at ffffffff815285a3
 #3 [ffff88001a2837a0] lbug_with_loc at ffffffffa0ac8eeb [libcfs]
 #4 [ffff88001a2837c0] osc_extent_merge at ffffffffa06ce57d [osc]
 #5 [ffff88001a2838d0] osc_extent_release at ffffffffa06d3efb [osc]
 #6 [ffff88001a283900] osc_io_end at ffffffffa06c520f [osc]
 #7 [ffff88001a283920] cl_io_end at ffffffffa0dfc270 [obdclass]
 #8 [ffff88001a283950] lov_io_end_wrapper at ffffffffa070f3b1 [lov]
 #9 [ffff88001a283970] lov_io_call at ffffffffa070f0fe [lov]
#10 [ffff88001a2839a0] lov_io_end at ffffffffa0710fbc [lov]
#11 [ffff88001a2839c0] cl_io_end at ffffffffa0dfc270 [obdclass]
#12 [ffff88001a2839f0] cl_io_loop at ffffffffa0e00b52 [obdclass]
#13 [ffff88001a283a20] ll_file_io_generic at ffffffffa125e20c [lustre]
#14 [ffff88001a283b40] ll_file_aio_write at ffffffffa125e933 [lustre]
#15 [ffff88001a283ba0] ll_file_write at ffffffffa125edd9 [lustre]
#16 [ffff88001a283c10] vfs_write at ffffffff81188df8
#17 [ffff88001a283c50] kernel_write at ffffffff811b8ded
#18 [ffff88001a283c80] write_pipe_buf at ffffffff811b8e5a
#19 [ffff88001a283cc0] splice_from_pipe_feed at ffffffff811b7a92
#20 [ffff88001a283d10] __splice_from_pipe at ffffffff811b84ee
#21 [ffff88001a283d50] splice_from_pipe at ffffffff811b8551
#22 [ffff88001a283da0] default_file_splice_write at ffffffff811b858d
#23 [ffff88001a283dc0] do_splice_from at ffffffff811b862e
#24 [ffff88001a283e00] direct_splice_actor at ffffffff811b8680
#25 [ffff88001a283e10] splice_direct_to_actor at ffffffff811b8956
#26 [ffff88001a283e80] do_splice_direct at ffffffff811b8a9d
#27 [ffff88001a283ed0] do_sendfile at ffffffff811891fc
#28 [ffff88001a283f30] sys_sendfile64 at ffffffff81189294
#29 [ffff88001a283f80] system_call_fastpath at ffffffff8100b072
    RIP: 0000003a522df7da  RSP: 00007fffe6f8add8  RFLAGS: 00010206
    RAX: 0000000000000028  RBX: ffffffff8100b072  RCX: 0000000000a00000
    RDX: 0000000000000000  RSI: 0000000000000003  RDI: 0000000000000004
    RBP: 0000000000000004   R8: 0000003a5258f300   R9: 0000003a51a0e9f0
    R10: 0000000000a00000  R11: 0000000000000206  R12: 0000000000000000
    R13: 00007fffe6f8aed0  R14: 0000000000401b90  R15: 0000000000000003
    ORIG_RAX: 0000000000000028  CS: 0033  SS: 002b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This is related to the group lock on the target file. If the group lock is commented out, then no crash happens.&lt;/p&gt;</description>
                <environment>centos 6 + Lustre head of tree (2.7+)</environment>
        <key id="29105">LU-6368</key>
            <summary>ASSERTION( cur-&gt;oe_dlmlock == victim-&gt;oe_dlmlock ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="fzago">Frank Zago</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Mon, 16 Mar 2015 16:06:32 +0000</created>
                <updated>Tue, 19 Mar 2019 15:17:22 +0000</updated>
                            <resolved>Sat, 9 May 2015 04:08:58 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="109783" author="paf" created="Mon, 16 Mar 2015 20:21:35 +0000"  >&lt;p&gt;A few notes.  File must be &amp;gt; 1 page in size, so &amp;gt; 4k.  (Definitely happens with a 100KiB file.  Haven&apos;t tested intervening sizes.)&lt;/p&gt;

&lt;p&gt;The crash happens during the write of the second page (It seems with sendfile that ll_file_write is called for each page.)&lt;/p&gt;

&lt;p&gt;I&apos;ll get some dk logs attached shortly, but here&apos;s my initial analysis.&lt;/p&gt;

&lt;p&gt;The core problem is that two separate group locks are issued&lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/warning.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;.  From the client logs...&lt;br/&gt;
00010000:00010000:1.0:1426488989.036499:0:1492:0:(ldlm_request.c:712:ldlm_cli_enqueue_fini()) ### client-side enqueue END ns: centss01-OST0000-osc-ffff8801383d5c00 lock: ffff8801395e17c0/0x17d7a5b11d7f9867 lrc: 4/0,1 mode: GROUP/GROUP res: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 rrc: 1 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 0-&amp;gt;18446744073709551615) flags: 0x20000 nid: local remote: 0x82dec12a52ea27a9 expref: -99 pid: 1492 timeout: 0 lvb_type: 1&lt;/p&gt;

&lt;p&gt;And without the first lock ever being released, it gets another group lock&lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/warning.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;:&lt;br/&gt;
00010000:00010000:0.0:1426488989.132324:0:1492:0:(ldlm_request.c:712:ldlm_cli_enqueue_fini()) ### client-side enqueue END ns: centss01-OST0000-osc-ffff8801383d5c00 lock: ffff8801395e19c0/0x17d7a5b11d7f986e lrc: 4/0,1 mode: GROUP/GROUP res: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 rrc: 2 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 4096-&amp;gt;8191) flags: 0x0 nid: local remote: 0x82dec12a52ea27b0 expref: -99 pid: 1492 timeout: 0 lvb_type: 1&lt;/p&gt;

&lt;p&gt;This happens in the sendfile code after it fails to match the first group lock.  I&apos;ve got an instrumented version of search_queue that shows the reason for the failed match:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                /* llite sometimes wants to match locks that will be
                 * canceled when their users drop, but we allow it to match
                 * &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; it passes in CBPENDING and the lock still has users.
                 * &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; is generally only going to be used by children
                 * whose parents already hold a lock so forward progress
                 * can still happen. */
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (ldlm_is_cbpending(lock) &amp;amp;&amp;amp;
                    !(flags &amp;amp; LDLM_FL_CBPENDING)) {
                        reason = 3;
                        &lt;span class=&quot;code-keyword&quot;&gt;continue&lt;/span&gt;;
                }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So trying to match a requested group lock against the existing one (s: centss01-OST0000-osc-ffff880139645000 lock: ffff88011286ad80/0x4166f63c6cfea1fd lrc: 4/0,1 mode: GROUP/GROUP res: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 rrc: 2 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; stride: 0 (req 0-&amp;gt;18446744073709551615) flags: 0x20400020000nid: local remote: 0x82dec12a52ea2ff2 expref: -99 pid: 6805timeout: 0 lvb_type: 1), it fails with reason 3, as noted above.&lt;/p&gt;

&lt;p&gt;So at a first remove, the cause of this problem is two group locks being issued which cover the same extent.  I&apos;ll give my further thoughts on this in another comment, and I&apos;ll also attach these dk logs.  (I can make the dump available as well.)&lt;/p&gt;</comment>
                            <comment id="109786" author="paf" created="Mon, 16 Mar 2015 20:38:53 +0000"  >&lt;p&gt;So:&lt;/p&gt;

&lt;p&gt;The first thought I have is that for a group lock, it should still be matchable if callback is pending (on the existing lock), since group locks cannot be called back.&lt;/p&gt;

&lt;p&gt;But that makes me wonder: Is it correct to allow the callback pending flag to be set on a group lock?  That seems like it might be wrong as well, for the same reason as above: Group locks cannot be called back, so callback pending has no meaning.&lt;/p&gt;

&lt;p&gt;Finally, and separately, I have not yet looked in to this yet, but it seems clearly wrong that the server should issue two group locks on the same resource to the same client.  Not sure how best to handle that, but it definitely shouldn&apos;t &lt;b&gt;grant&lt;/b&gt; such a lock, unless I&apos;ve really missed something.&lt;/p&gt;

&lt;p&gt;(I am not quite sure how/why the OSC layer decides to request a group lock for the second lock, rather than a normal PW lock.)&lt;/p&gt;</comment>
                            <comment id="109787" author="paf" created="Mon, 16 Mar 2015 20:41:41 +0000"  >&lt;p&gt;For reference, here is dump_namespaces from the server shortly after a client crashed with this test (this is not the same run as the rest of the DK logs):&lt;br/&gt;
00010000:00010000:0.0:1426493058.930173:0:2207:0:(ldlm_resource.c:1376:ldlm_resource_dump()) &amp;#8212; Resource: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 (ffff880125d5b0c0) refcount = 3&lt;br/&gt;
00010000:00010000:0.0:1426493058.930174:0:2207:0:(ldlm_resource.c:1379:ldlm_resource_dump()) Granted locks (in reverse order):&lt;br/&gt;
00010000:00010000:0.0:1426493058.930176:0:2207:0:(ldlm_resource.c:1382:ldlm_resource_dump()) ### ### ns: filter-centss01-OST0000_UUID lock: ffff880121730b40/0x82dec12a52ea32d1 lrc: 2/0,0 mode: GROUP/GROUP res: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 rrc: 3 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 4096-&amp;gt;8191) flags: 0x40000000000000 nid: 192.168.1.20@tcp remote: 0xcde4e8d126caf1be expref: 13 pid: 2175 timeout: 0 lvb_type: 0&lt;br/&gt;
00010000:00010000:0.0:1426493058.930179:0:2207:0:(ldlm_resource.c:1382:ldlm_resource_dump()) ### ### ns: filter-centss01-OST0000_UUID lock: ffff880121730340/0x82dec12a52ea32ca lrc: 2/0,0 mode: GROUP/GROUP res: &lt;span class=&quot;error&quot;&gt;&amp;#91;0xf2:0x0:0x0&amp;#93;&lt;/span&gt;.0 rrc: 3 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 0-&amp;gt;18446744073709551615) flags: 0x40000000020000 nid: 192.168.1.20@tcp remote: 0xcde4e8d126caf1b7 expref: 13 pid: 2175 timeout: 0 lvb_type: 0&lt;/p&gt;</comment>
                            <comment id="109792" author="gerrit" created="Mon, 16 Mar 2015 21:23:37 +0000"  >&lt;p&gt;Patrick Farrell (paf@cray.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/14093&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14093&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6368&quot; title=&quot;ASSERTION( cur-&amp;gt;oe_dlmlock == victim-&amp;gt;oe_dlmlock ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6368&quot;&gt;&lt;del&gt;LU-6368&lt;/del&gt;&lt;/a&gt; ldlm: Ignore cbpending for group locks&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8a9287c2fb29f98d83654c42f7ee3c61015bb243&lt;/p&gt;</comment>
                            <comment id="109793" author="paf" created="Mon, 16 Mar 2015 21:43:20 +0000"  >&lt;p&gt;The patch at &lt;a href=&quot;http://review.whamcloud.com/14093&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14093&lt;/a&gt; fixes the LBUG described here by attacking my &quot;first thought&quot; above: It causes search_queue to ignore the CBPDENDING flag on existing group locks, and match to those locks anyway.&lt;/p&gt;

&lt;p&gt;I&apos;m still concerned about the other two thoughts - Should the CBPENDING flag be set?  Does it have any meaning for group locks?  If not, should we be refusing to set it?&lt;/p&gt;

&lt;p&gt;And what about the fact this lock was granted?  It&apos;s a robustness thing rather than a fix for this bug, but it seems like we should have some sanity check that prevents granting this lock.  I&apos;ll take a stab at writing one in search_queue, but I&apos;m not sure I like what I&apos;m thinking of.&lt;/p&gt;</comment>
                            <comment id="109796" author="paf" created="Mon, 16 Mar 2015 22:20:55 +0000"  >&lt;p&gt;Client dk logs with special debug added.  (The debug is part of my strided lock patches and so includes a lot of extra info in other places as well.)&lt;/p&gt;</comment>
                            <comment id="109857" author="jay" created="Tue, 17 Mar 2015 16:55:07 +0000"  >&lt;p&gt;I agree that it&apos;s wrong to give up group lock therefore group lock should have never had CBPENDING bit set.&lt;/p&gt;</comment>
                            <comment id="109889" author="paf" created="Tue, 17 Mar 2015 18:47:30 +0000"  >&lt;p&gt;Jinshan - That flag is being set by ldlm_lock_decref_and_cancel, which is called specifically from osc_cancel_base just for group locks, and the only difference from regular ldlm_lock_decref is that cbpending is set on the lock in decref_and_cancel.&lt;/p&gt;

&lt;p&gt;Here&apos;s the comment on it:&lt;br/&gt;
/**&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;Decrease reader/writer refcount for LDLM lock with handle&lt;/li&gt;
	&lt;li&gt;\a lockh and mark it for subsequent cancellation once r/w refcount&lt;/li&gt;
	&lt;li&gt;drops to zero instead of putting into LRU.&lt;br/&gt;
 *&lt;/li&gt;
	&lt;li&gt;Typical usage is for GROUP locks which we cannot allow to be cached.&lt;br/&gt;
 */&lt;br/&gt;
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)&lt;br/&gt;
{&lt;br/&gt;
        struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);&lt;br/&gt;
        ENTRY;&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;        LASSERT(lock != NULL);&lt;/p&gt;

&lt;p&gt;        LDLM_DEBUG(lock, &quot;ldlm_lock_decref(%s)&quot;, ldlm_lockname&lt;span class=&quot;error&quot;&gt;&amp;#91;mode&amp;#93;&lt;/span&gt;);&lt;br/&gt;
        lock_res_and_lock(lock);&lt;br/&gt;
        ldlm_set_cbpending(lock);&lt;br/&gt;
        unlock_res_and_lock(lock);&lt;br/&gt;
        ldlm_lock_decref_internal(lock, mode);&lt;br/&gt;
        LDLM_LOCK_PUT(lock);&lt;br/&gt;
}&lt;br/&gt;
EXPORT_SYMBOL(ldlm_lock_decref_and_cancel);&lt;/p&gt;
&lt;hr /&gt;
&lt;p&gt;So it&apos;s definitely intentional that CBPENDING be set on group locks.  I don&apos;t fully understand the life cycle for LDLM and OSC locks, but here&apos;s what I see...&lt;/p&gt;

&lt;p&gt;The group lock request (from the ioctl) gets a writer reference on the lock, and this reference is not released until the lock is cancelled by the usual manual cancellation request.&lt;/p&gt;

&lt;p&gt;So I&apos;ve decided to try removing the usage of CBPENDING for group locks and instead make ldlm_lock_decref_internal group lock aware.  It will cancel group locks once they have zero reader &amp;amp; writer references, whether or not they have CBPENDING set.&lt;/p&gt;

&lt;p&gt;I&apos;ll put that patch up in gerrit in a moment.  (Haven&apos;t tested it yet, but will do.)&lt;/p&gt;</comment>
                            <comment id="113582" author="gerrit" created="Tue, 28 Apr 2015 05:01:39 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/14093/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14093/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6368&quot; title=&quot;ASSERTION( cur-&amp;gt;oe_dlmlock == victim-&amp;gt;oe_dlmlock ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6368&quot;&gt;&lt;del&gt;LU-6368&lt;/del&gt;&lt;/a&gt; ldlm: Do not use cbpending for group locks&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3e6c20afa18a64c5cb949ecf2ed0f49202ba3e15&lt;/p&gt;</comment>
                            <comment id="114804" author="pjones" created="Sat, 9 May 2015 04:08:58 +0000"  >&lt;p&gt;Landed for 2.8&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="29113">LU-6371</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="30480">LU-6679</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="17310" name="LU-6368_client_dklog.sort.gz" size="1420995" author="paf" created="Mon, 16 Mar 2015 22:20:55 +0000"/>
                            <attachment id="17305" name="llsendfile3.c" size="1319" author="fzago" created="Mon, 16 Mar 2015 16:06:32 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzx8kn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>