<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:42:02 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4360] Use after free in ksocklnd</title>
                <link>https://jira.whamcloud.com/browse/LU-4360</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/d275a9d4-5eb4-11e3-ae30-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/d275a9d4-5eb4-11e3-ae30-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test lustre-initialization_1 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;Test system failed to start single suite, so abandoning all hope and giving up&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: lustre-initialization-1 lustre-initialization_1&lt;/p&gt;

&lt;p&gt;Testing with DEBUG_PAGEALLOC enabled frequently OST fails to starup hitting an oops:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;11:23:12:[  289.592410] BUG: unable to handle kernel paging request at ffff880013a91e30
11:23:12:[  289.593112] IP: [&amp;lt;ffffffffa09ef395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
11:23:12:[  289.593691] PGD 1a26063 PUD 1a2a063 PMD 19d067 PTE 13a91060
11:23:12:[  289.594169] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
11:23:12:[  289.594557] last sysfs file: /sys/devices/system/cpu/possible
11:23:12:[  289.595026] CPU 0 
11:23:12:[  289.595188] Modules linked in: osp(U) ofd(U) lfsck(U) ost(U) mgc(U) osd_ldiskfs(U) lquota(U) lustre(U) lov(U) osc(U) mdc(U) fid(U) fld(U) ksocklnd(U) ptlrpc(U) obdclass(U) lnet(U) sha512_generic sha256_generic libcfs(U) ldiskfs(U) jbd2 nfsd exportfs autofs4 nfs lockd fscache auth_rpcgss nfs_acl sunrpc ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_addr ipv6 ib_sa ib_mad ib_core microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core ext3 jbd mbcache virtio_blk virtio_pci virtio_ring virtio pata_acpi ata_generic ata_piix dm_mirror dm_region_hash dm_log dm_mod [last unloaded: speedstep_lib]
11:23:12:[  289.596009] 
11:23:12:[  289.596009] Pid: 5138, comm: ll_cfg_requeue Not tainted 2.6.32-358.23.2.el6_lustre.g3ddc521.x86_64 #1 Red Hat KVM
11:23:12:[  289.596009] RIP: 0010:[&amp;lt;ffffffffa09ef395&amp;gt;]  [&amp;lt;ffffffffa09ef395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
11:23:12:[  289.596009] RSP: 0018:ffff88002e315880  EFLAGS: 00010286
11:23:12:[  289.596009] RAX: 0000000000000000 RBX: ffff880013a79ef0 RCX: 0000000000000009
11:23:12:[  289.596009] RDX: ffff880002200000 RSI: 0000000000000292 RDI: ffffffffa09feb38
11:23:12:[  289.596009] RBP: ffff88002e3158f0 R08: 0000000000000000 R09: 0000000000000000
11:23:12:[  289.596009] R10: ffff8800148179e0 R11: 0000000000000000 R12: ffff880013a91df0
11:23:12:[  289.596009] R13: 0000000000000001 R14: ffff880015c6e740 R15: 0000000000000148
11:23:12:[  289.596009] FS:  0000000000000000(0000) GS:ffff880002200000(0000) knlGS:0000000000000000
11:23:12:[  289.596009] CS:  0010 DS: 0018 ES: 0018 CR0: 000000008005003b
11:23:12:[  289.596009] CR2: ffff880013a91e30 CR3: 000000001541c000 CR4: 00000000000006f0
11:23:12:[  289.596009] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
11:23:12:[  289.596009] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
11:23:12:[  289.596009] Process ll_cfg_requeue (pid: 5138, threadinfo ffff88002e314000, task ffff88002e3121c0)
11:23:12:[  289.596009] Stack:
11:23:12:[  289.596009]  ffff8800350d9001 0000000113a91df0 0000000000000000 ffff8800346a4df0
11:23:12:[  289.596009] &amp;lt;d&amp;gt; 0000000000000000 000000e800000000 000200000a0a11cc 0000000000003039
11:23:12:[  289.596009] &amp;lt;d&amp;gt; ffff88002e315910 ffff8800346a4df0 ffff880013a91df0 ffff880013a91df0
11:23:12:[  289.596009] Call Trace:
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0532dfb&amp;gt;] lnet_ni_send+0x4b/0xf0 [lnet]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0537005&amp;gt;] lnet_send+0x655/0xb80 [lnet]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa053806a&amp;gt;] LNetPut+0x31a/0x860 [lnet]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0808dc0&amp;gt;] ptl_send_buf+0x1e0/0x550 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffff8103b82c&amp;gt;] ? kvm_clock_read+0x1c/0x20
11:23:12:[  289.596009]  [&amp;lt;ffffffffa080c2bd&amp;gt;] ptl_send_rpc+0x4dd/0xcc0 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0800994&amp;gt;] ptlrpc_send_new_req+0x454/0x7c0 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa08061fe&amp;gt;] ptlrpc_set_wait+0x5ce/0x830 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa08101c6&amp;gt;] ? lustre_msg_set_jobid+0xb6/0x140 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa08064e7&amp;gt;] ptlrpc_queue_wait+0x87/0x220 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa07e1ef5&amp;gt;] ldlm_cli_enqueue+0x365/0x790 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa07e6bd0&amp;gt;] ? ldlm_completion_ast+0x0/0x930 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0db0080&amp;gt;] ? mgc_blocking_ast+0x0/0x810 [mgc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0db5633&amp;gt;] mgc_process_log+0x283/0x17f0 [mgc]
11:23:12:[  289.596009]  [&amp;lt;ffffffff81080722&amp;gt;] ? del_timer_sync+0x22/0x30
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0db0080&amp;gt;] ? mgc_blocking_ast+0x0/0x810 [mgc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa07e6bd0&amp;gt;] ? ldlm_completion_ast+0x0/0x930 [ptlrpc]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0487951&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0db775a&amp;gt;] mgc_requeue_thread+0x31a/0x7a0 [mgc]
11:23:12:[  289.596009]  [&amp;lt;ffffffff8105bca0&amp;gt;] ? default_wake_function+0x0/0x20
11:23:12:[  289.596009]  [&amp;lt;ffffffffa0db7440&amp;gt;] ? mgc_requeue_thread+0x0/0x7a0 [mgc]
11:23:12:[  289.596009]  [&amp;lt;ffffffff81095696&amp;gt;] kthread+0x96/0xa0
11:23:12:[  289.596009]  [&amp;lt;ffffffff8100c10a&amp;gt;] child_rip+0xa/0x20
11:23:12:[  289.596009]  [&amp;lt;ffffffff81095600&amp;gt;] ? kthread+0x0/0xa0
11:23:12:[  289.596009]  [&amp;lt;ffffffff8100c100&amp;gt;] ? child_rip+0x0/0x20
11:23:12:[  289.596009] Code: 00 c7 43 60 c1 00 00 00 48 89 de 48 c7 43 70 00 00 00 00 48 c7 43 68 00 00 00 00 48 8b 55 c0 8b 4d c8 48 8b 7d a8 e8 7b fa ff ff &amp;lt;41&amp;gt; f6 44 24 40 08 74 1b 8b 4d b4 85 c9 0f 84 c8 00 00 00 65 48 
11:23:12:[  289.596009] RIP  [&amp;lt;ffffffffa09ef395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
11:23:12:[  289.596009]  RSP &amp;lt;ffff88002e315880&amp;gt;
11:23:12:[  289.596009] CR2: ffff880013a91e30
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="22378">LU-4360</key>
            <summary>Use after free in ksocklnd</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="emoly.liu">Emoly Liu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 9 Dec 2013 05:20:57 +0000</created>
                <updated>Tue, 11 Mar 2014 22:57:39 +0000</updated>
                            <resolved>Thu, 23 Jan 2014 08:11:10 +0000</resolved>
                                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.1</fixVersion>
                    <fixVersion>Lustre 2.4.3</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="73156" author="pjones" created="Mon, 9 Dec 2013 23:59:13 +0000"  >&lt;p&gt;Emoly&lt;/p&gt;

&lt;p&gt;Could you please look into this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="73433" author="emoly.liu" created="Fri, 13 Dec 2013 01:58:41 +0000"  >&lt;p&gt;I built lustre on kerne-2.6.32.358.23.2.el6 with CONFIG_DEBUG_PAGEALLOC=y, and ran llmount.sh or started OST manually many times, but failed to hit this failure.&lt;/p&gt;

&lt;p&gt;Do I miss anything?&lt;/p&gt;</comment>
                            <comment id="74136" author="green" created="Sat, 28 Dec 2013 00:30:18 +0000"  >&lt;p&gt;The trick seems to be to have real networking in place and real clients.&lt;br/&gt;
I just reproduced this locally by having a server and client to reside on two separate nodes and it crashed right away.&lt;/p&gt;

&lt;p&gt;Interestingly enough my own stacktrace is different which seems to indicate a generic class of problem somewhere, that causes a request taht is being sent to be freed mid-sending.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;1&amp;gt;[125127.785385] BUG: unable to handle kernel paging request at ffff8800814bee30
&amp;lt;1&amp;gt;[125127.785701] IP: [&amp;lt;ffffffffa0559395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
&amp;lt;4&amp;gt;[125127.786001] PGD 1a26063 PUD 501067 PMD 50c067 PTE 80000000814be060
&amp;lt;4&amp;gt;[125127.786417] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
&amp;lt;4&amp;gt;[125127.786685] last sysfs file: /sys/devices/system/cpu/possible
&amp;lt;4&amp;gt;[125127.786946] CPU 2 
&amp;lt;4&amp;gt;[125127.786983] Modules linked in: lustre ofd osp lod ost mdt mdd mgs osd_ldiskfs ldiskfs exportfs lquota lfsck jbd obdecho mgc lov osc mdc lmv fid fld ptlrpc obdclass ksocklnd lnet sha512_generic sha256_generic libcfs ext4 jbd2 mbcache ppdev parport_pc parport virtio_balloon virtio_console i2c_piix4 i2c_core virtio_blk virtio_net virtio_pci virtio_ring virtio pata_acpi ata_generic ata_piix dm_mirror dm_region_hash dm_log dm_mod nfs lockd fscache auth_rpcgss nfs_acl sunrpc be2iscsi bnx2i cnic uio ipv6 cxgb3i libcxgbi cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi [last unloaded: speedstep_lib]
&amp;lt;4&amp;gt;[125127.789257] 
&amp;lt;4&amp;gt;[125127.789257] Pid: 7588, comm: ldlm_cn00_000 Not tainted 2.6.32-rhe6.4-debug2 #1 Bochs Bochs
&amp;lt;4&amp;gt;[125127.789257] RIP: 0010:[&amp;lt;ffffffffa0559395&amp;gt;]  [&amp;lt;ffffffffa0559395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
&amp;lt;4&amp;gt;[125127.789257] RSP: 0018:ffff8800b611da80  EFLAGS: 00010286
&amp;lt;4&amp;gt;[125127.789257] RAX: 0000000000000000 RBX: ffff88007a167ef0 RCX: ffffc9000638e060
&amp;lt;4&amp;gt;[125127.789257] RDX: 0000000000000001 RSI: 0000000000000282 RDI: ffffffffa0568658
&amp;lt;4&amp;gt;[125127.789257] RBP: ffff8800b611daf0 R08: 0000000000000000 R09: 0000000000000001
&amp;lt;4&amp;gt;[125127.789257] R10: 0000000000000001 R11: ffffffff81d5fb40 R12: ffff8800814bedf0
&amp;lt;4&amp;gt;[125127.789257] R13: 0000000000000001 R14: ffff8800b658fad0 R15: 00000000000000c0
&amp;lt;4&amp;gt;[125127.789257] FS:  0000000000000000(0000) GS:ffff880006280000(0000) knlGS:0000000000000000
&amp;lt;4&amp;gt;[125127.789257] CS:  0010 DS: 0018 ES: 0018 CR0: 000000008005003b
&amp;lt;4&amp;gt;[125127.789257] CR2: ffff8800814bee30 CR3: 0000000001a25000 CR4: 00000000000006e0
&amp;lt;4&amp;gt;[125127.789257] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
&amp;lt;4&amp;gt;[125127.789257] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
&amp;lt;4&amp;gt;[125127.789257] Process ldlm_cn00_000 (pid: 7588, threadinfo ffff8800b611c000, task ffff8800b611a240)
&amp;lt;4&amp;gt;[125127.789257] Stack:
&amp;lt;4&amp;gt;[125127.789257]  ffff8800814be001 00000001a04f6dec 0000000000000000 ffff8800b5974df0
&amp;lt;4&amp;gt;[125127.789257] &amp;lt;d&amp;gt; 0000000000000000 000000e800000000 00020000c0a80abf 0000000000003039
&amp;lt;4&amp;gt;[125127.789257] &amp;lt;d&amp;gt; ffff8800b611db10 ffff8800b5974df0 ffff8800814bedf0 ffff8800814bedf0
&amp;lt;4&amp;gt;[125127.789257] Call Trace:
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa04f1dfb&amp;gt;] lnet_ni_send+0x4b/0xf0 [lnet]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa04f6005&amp;gt;] lnet_send+0x655/0xb80 [lnet]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa04f706a&amp;gt;] LNetPut+0x31a/0x860 [lnet]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa07ff290&amp;gt;] ptl_send_buf+0x1e0/0x550 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa081f04b&amp;gt;] ? at_measured+0x25b/0x380 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa0841495&amp;gt;] ? null_authorize+0x75/0x100 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa07ff884&amp;gt;] ptlrpc_send_reply+0x284/0x800 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa0800007&amp;gt;] ptlrpc_reply+0x27/0x30 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa07e01b8&amp;gt;] ldlm_handle_cancel+0x188/0x240 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa07e5c79&amp;gt;] ldlm_cancel_handler+0x1e9/0x500 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa0817c13&amp;gt;] ptlrpc_main+0xcd3/0x1940 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffffa0816f40&amp;gt;] ? ptlrpc_main+0x0/0x1940 [ptlrpc]
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffff81094726&amp;gt;] kthread+0x96/0xa0
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffff8100c10a&amp;gt;] child_rip+0xa/0x20
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffff81094690&amp;gt;] ? kthread+0x0/0xa0
&amp;lt;4&amp;gt;[125127.789257]  [&amp;lt;ffffffff8100c100&amp;gt;] ? child_rip+0x0/0x20
&amp;lt;4&amp;gt;[125127.789257] Code: 00 c7 43 60 c1 00 00 00 48 89 de 48 c7 43 70 00 00 00 00 48 c7 43 68 00 00 00 00 48 8b 55 c0 8b 4d c8 48 8b 7d a8 e8 7b fa ff ff &amp;lt;41&amp;gt; f6 44 24 40 08 74 1b 8b 4d b4 85 c9 0f 84 c8 00 00 00 65 48 
&amp;lt;1&amp;gt;[125127.789257] RIP  [&amp;lt;ffffffffa0559395&amp;gt;] ksocknal_send+0x165/0x450 [ksocklnd]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="74137" author="green" created="Sat, 28 Dec 2013 01:58:37 +0000"  >&lt;p&gt;This bug was introduced by Isaac in commit e2a2fab993d01597010cb2b44df44a522af0eec8&lt;/p&gt;

&lt;p&gt;there is a very profilic use after free in ksocknal_send - once it calls ksocknal_launch_packet - this might trigger a callback that frees the request, and then ksocknal_send tries to access the request again with &lt;br/&gt;
 &#160; &#160; &#160; &#160;if (lntmsg-&amp;gt;msg_vmflush)&lt;br/&gt;
 &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160;cfs_memory_pressure_restore(mpflag);&lt;/p&gt;

&lt;p&gt;and we die&lt;/p&gt;

&lt;p&gt;Potentially other LNDs are succeptible to a similar problem as well.&lt;/p&gt;

&lt;p&gt;I verified that moving call to ksocknal_launch_packet after the vmflush check fixes the issue for me, but I am not 100% sure if it actually never allocates anything (making it the valid fix) or not.&lt;/p&gt;</comment>
                            <comment id="74138" author="green" created="Sat, 28 Dec 2013 03:35:17 +0000"  >&lt;p&gt;My proposed patch: &lt;a href=&quot;http://review.whamcloud.com/8667&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8667&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="75497" author="adilger" created="Thu, 23 Jan 2014 08:11:10 +0000"  >&lt;p&gt;Oleg&apos;s patch has landed, and recent problems reported against this bug appear to be related to configuration of the test infrastructure&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwaw7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11940</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>