<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:21:05 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8849] Client hangs on &apos;lfs ladvise&apos; with large input values</title>
                <link>https://jira.whamcloud.com/browse/LU-8849</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I was playing around with input values for &apos;lfs ladvise&apos; and trying to see what maximums I could send as inputs. The following commands worked with no obvious errors or returned an error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# lfs ladvise -a willread /lustre/scratch/tfile --length 5000000000
# lfs ladvise -a willread /lustre/scratch/tfile --length 50000000009999999999999
# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099M
# lfs ladvise -a willread /lustre/scratch/space_file2 --length 50000000009999M
ladvise: bad length &apos;50000000009999M&apos;
# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099G
ladvise: bad length &apos;500000000099G&apos;
# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0
# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 1M
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The I tried one command too many and I hung the client with &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 500000000099k
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The file resides on OST0:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# lfs getstripe /lustre/scratch/space_file2 
/lustre/scratch/space_file2
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_pattern:        1
lmm_layout_gen:     0
lmm_stripe_offset:  0
	obdidx		 objid		 objid		 group
	     0	            15	          0xf	             0

ls -l /lustre/scratch/space_file2 
-rw-r--r-- 1 root root 104857600 Nov 17 21:55 /lustre/scratch/space_file2
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;dmesg from the client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;INFO: task lfs:26850 blocked for more than 120 seconds.
      Not tainted 2.6.32-642.6.1.el6.x86_64 #1
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
lfs           D 0000000000000000     0 26850  15016 0x00000084
 ffff880079cbbaf8 0000000000000086 0000000000000000 ffff88007851a040
 ffff88007851a078 ffff880078503bc0 00001e14781a34aa 0000000000000000
 ffff88007c01f558 0000000101f41458 ffff88007c01fad8 ffff880079cbbfd8
Call Trace:
 [&amp;lt;ffffffff815498b5&amp;gt;] schedule_timeout+0x215/0x2e0
 [&amp;lt;ffffffffa07765d3&amp;gt;] ? ptlrpc_set_add_new_req+0xe3/0x160 [ptlrpc]
 [&amp;lt;ffffffff81549513&amp;gt;] wait_for_common+0x123/0x180
 [&amp;lt;ffffffff8106c500&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa09bb3e0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
 [&amp;lt;ffffffff8154964d&amp;gt;] wait_for_completion+0x1d/0x20
 [&amp;lt;ffffffffa0b336f0&amp;gt;] osc_io_ladvise_end+0x40/0x60 [osc]
 [&amp;lt;ffffffffa05e7a30&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
 [&amp;lt;ffffffffa05e8410&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
 [&amp;lt;ffffffffa09bb4d1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
 [&amp;lt;ffffffffa09bb228&amp;gt;] lov_io_call+0x88/0x120 [lov]
 [&amp;lt;ffffffffa09bccac&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
 [&amp;lt;ffffffffa05e7a30&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
 [&amp;lt;ffffffffa05eb362&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
 [&amp;lt;ffffffffa0a33ade&amp;gt;] ll_file_ioctl+0x2ece/0x3eb0 [lustre]
 [&amp;lt;ffffffff8115a409&amp;gt;] ? handle_mm_fault+0x299/0x3d0
 [&amp;lt;ffffffff81052204&amp;gt;] ? __do_page_fault+0x1f4/0x500
 [&amp;lt;ffffffff811af522&amp;gt;] vfs_ioctl+0x22/0xa0
 [&amp;lt;ffffffff811af9ea&amp;gt;] do_vfs_ioctl+0x3aa/0x580
 [&amp;lt;ffffffff811afc41&amp;gt;] sys_ioctl+0x81/0xa0
 [&amp;lt;ffffffff8100b0d2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On OST0 dmesg:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNet: Service thread pid 7920 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Pid: 7920, comm: ll_ost_io00_012

Call Trace:
 [&amp;lt;ffffffff8113e758&amp;gt;] ? __alloc_pages_nodemask+0x548/0x950
 [&amp;lt;ffffffff81177d9a&amp;gt;] ? alloc_pages_current+0xaa/0x110
 [&amp;lt;ffffffff8112eca7&amp;gt;] ? __page_cache_alloc+0x87/0x90
 [&amp;lt;ffffffff8112fa3f&amp;gt;] ? find_or_create_page+0x4f/0xb0
 [&amp;lt;ffffffffa0d3a412&amp;gt;] ? osd_bufs_get+0x2b2/0x4b0 [osd_ldiskfs]
 [&amp;lt;ffffffffa0e96fd0&amp;gt;] ? ofd_ladvise_hdl+0x8f0/0xdc0 [ofd]
 [&amp;lt;ffffffffa0840201&amp;gt;] ? lustre_pack_reply+0x11/0x20 [ptlrpc]
 [&amp;lt;ffffffffa08a449c&amp;gt;] ? tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa0850ba1&amp;gt;] ? ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffffa084fe70&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a640e&amp;gt;] ? kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] ? child_rip+0xa/0x20
 [&amp;lt;ffffffff810a6370&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20

LustreError: dumping log to /tmp/lustre-log.1479431579.7920
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;After running some tests on a different client to see if the file system was responsive, dmesg on OST0 has:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: 7912:0:(service.c:1331:ptlrpc_at_send_early_reply()) @@@ Couldn&apos;t add any time (5/5), not sending early reply
  req@ffff88007ace3450 x1551266786862416/t0(0) o21-&amp;gt;5959cf4b-6c57-f313-be15-ccf7ea98876e@10.100.4.158@tcp:-1/-1 lens 504/432 e 1 to 0 dl 1479432506 ref 2 fl Interpret:/0/0 rc 0/0
Lustre: scratch-OST0000: Client 5959cf4b-6c57-f313-be15-ccf7ea98876e (at 10.100.4.158@tcp) reconnecting
Lustre: scratch-OST0000: Connection restored to 5959cf4b-6c57-f313-be15-ccf7ea98876e (at 10.100.4.158@tcp)
LNet: Service thread pid 31614 was inactive for 762.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Pid: 31614, comm: ll_ost_io00_019

Call Trace:
 [&amp;lt;ffffffff8107405a&amp;gt;] __cond_resched+0x2a/0x40
 [&amp;lt;ffffffff8113e339&amp;gt;] ? __alloc_pages_nodemask+0x129/0x950
 [&amp;lt;ffffffff8117f10c&amp;gt;] ? transfer_objects+0x5c/0x80
 [&amp;lt;ffffffff8118d2a5&amp;gt;] ? mem_cgroup_charge_common+0xa5/0xd0
 [&amp;lt;ffffffff8118e0b0&amp;gt;] ? mem_cgroup_cache_charge+0xc0/0xd0
 [&amp;lt;ffffffff81177d9a&amp;gt;] ? alloc_pages_current+0xaa/0x110
 [&amp;lt;ffffffff8112eca7&amp;gt;] ? __page_cache_alloc+0x87/0x90
 [&amp;lt;ffffffff8112fa3f&amp;gt;] ? find_or_create_page+0x4f/0xb0
 [&amp;lt;ffffffffa0d3a412&amp;gt;] ? osd_bufs_get+0x2b2/0x4b0 [osd_ldiskfs]
 [&amp;lt;ffffffffa0e96fd0&amp;gt;] ? ofd_ladvise_hdl+0x8f0/0xdc0 [ofd]
 [&amp;lt;ffffffffa0840201&amp;gt;] ? lustre_pack_reply+0x11/0x20 [ptlrpc]
 [&amp;lt;ffffffffa08a449c&amp;gt;] ? tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa0850ba1&amp;gt;] ? ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffffa084fe70&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a640e&amp;gt;] ? kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] ? child_rip+0xa/0x20
 [&amp;lt;ffffffff810a6370&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20

LustreError: dumping log to /tmp/lustre-log.1479432668.31614
Lustre: scratch-OST0000: Client 28158de5-48d2-2404-815b-3bb36c5b7839 (at 10.100.4.157@tcp) reconnecting
Lustre: scratch-OST0000: Connection restored to 28158de5-48d2-2404-815b-3bb36c5b7839 (at 10.100.4.157@tcp)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I do not know how repeatable this is. I tried to do another &apos;lfs advise&apos; call on a different node and it is hung also. On this node I ran the following commands until the last one hung:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0 
[root@eagle-48vm5 ~]# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0k
[root@eagle-48vm5 ~]# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0M
[root@eagle-48vm5 ~]# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0T
[root@eagle-48vm5 ~]# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 0G
[root@eagle-48vm5 ~]# lfs ladvise -a willread /lustre/scratch/space_file2 --length 500000000099k -s 1G
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;With dmesg on this client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: 7929:0:(client.c:2111:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1479431906/real 1479431906]  req@ffff88007bd859c0 x1551266786862416/t0(0) o21-&amp;gt;scratch-OST0000-osc-ffff880078ae4800@10.100.4.155@tcp:6/4 lens 504/432 e 1 to 1 dl 1479432507 ref 1 fl Rpc:X/0/ffffffff rc 0/-1
Lustre: scratch-OST0000-osc-ffff880078ae4800: Connection to scratch-OST0000 (at 10.100.4.155@tcp) was lost; in progress operations using this service will wait for recovery to complete
Lustre: scratch-OST0000-osc-ffff880078ae4800: Connection restored to 10.100.4.155@tcp (at 10.100.4.155@tcp)
INFO: task lfs:26783 blocked for more than 120 seconds.
      Not tainted 2.6.32-642.6.1.el6.x86_64 #1
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
lfs           D 0000000000000001     0 26783  15019 0x00000084
 ffff88007b5a7af8 0000000000000086 ffff88007b5a7ac0 ffff88007b5a7abc
 0000000000000020 ffff88007f824380 00001e649680e0ec ffff880002216ec0
 0000000000000400 0000000101f954bb ffff880037abe5f8 ffff88007b5a7fd8
Call Trace:
 [&amp;lt;ffffffff815498b5&amp;gt;] schedule_timeout+0x215/0x2e0
 [&amp;lt;ffffffffa07765d3&amp;gt;] ? ptlrpc_set_add_new_req+0xe3/0x160 [ptlrpc]
 [&amp;lt;ffffffffa07ac493&amp;gt;] ? ptlrpcd_add_req+0x1e3/0x2a0 [ptlrpc]
 [&amp;lt;ffffffffa078e0c0&amp;gt;] ? lustre_swab_ladvise+0x0/0x50 [ptlrpc]
 [&amp;lt;ffffffff81549513&amp;gt;] wait_for_common+0x123/0x180
 [&amp;lt;ffffffff8106c500&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa09bb3e0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
 [&amp;lt;ffffffff8154964d&amp;gt;] wait_for_completion+0x1d/0x20
 [&amp;lt;ffffffffa0b336f0&amp;gt;] osc_io_ladvise_end+0x40/0x60 [osc]
 [&amp;lt;ffffffffa05e7a30&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
 [&amp;lt;ffffffffa05e8410&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
 [&amp;lt;ffffffffa09bb4d1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
 [&amp;lt;ffffffffa09bb228&amp;gt;] lov_io_call+0x88/0x120 [lov]
 [&amp;lt;ffffffffa09bccac&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
 [&amp;lt;ffffffffa05e7a30&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
 [&amp;lt;ffffffffa05eb362&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
 [&amp;lt;ffffffffa0a33ade&amp;gt;] ll_file_ioctl+0x2ece/0x3eb0 [lustre]
 [&amp;lt;ffffffff8115a409&amp;gt;] ? handle_mm_fault+0x299/0x3d0
 [&amp;lt;ffffffff81052204&amp;gt;] ? __do_page_fault+0x1f4/0x500
 [&amp;lt;ffffffff811af522&amp;gt;] vfs_ioctl+0x22/0xa0
 [&amp;lt;ffffffff811af9ea&amp;gt;] do_vfs_ioctl+0x3aa/0x580
 [&amp;lt;ffffffff811afc41&amp;gt;] sys_ioctl+0x81/0xa0
 [&amp;lt;ffffffff8100b0d2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>eagle cluster with lustre-master 2.8.60 </environment>
        <key id="41666">LU-8849</key>
            <summary>Client hangs on &apos;lfs ladvise&apos; with large input values</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="lixi">Li Xi</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Fri, 18 Nov 2016 01:39:55 +0000</created>
                <updated>Sat, 22 Jul 2017 03:53:38 +0000</updated>
                            <resolved>Sat, 22 Jul 2017 03:53:16 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                    <fixVersion>Lustre 2.11.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="174186" author="adilger" created="Fri, 18 Nov 2016 04:42:29 +0000"  >&lt;p&gt;James, is this ldiskfs or ZFS?  Not sure that it matters, but just in case. &lt;/p&gt;

&lt;p&gt;Li Xi, can you please make a patch that limits the ladvise start and end to the object&apos;s size, so that it doesn&apos;t try filling the pages beyond the end of the object. Ideally, we would also use FIEMAP or SEEK_HOLE/SEEK_DATA to skip holes in the object. &lt;/p&gt;</comment>
                            <comment id="174231" author="jamesanunez" created="Fri, 18 Nov 2016 14:58:48 +0000"  >&lt;p&gt;All testing reported here is with ldiskfs.&lt;/p&gt;</comment>
                            <comment id="186184" author="adilger" created="Sat, 25 Feb 2017 08:02:48 +0000"  >&lt;p&gt;Li Xi, any update on this?&lt;/p&gt;</comment>
                            <comment id="196427" author="lixi" created="Fri, 19 May 2017 03:38:31 +0000"  >&lt;p&gt;Sorry for the delay. We will work on this.&lt;/p&gt;</comment>
                            <comment id="196439" author="gerrit" created="Fri, 19 May 2017 08:57:29 +0000"  >&lt;p&gt;Yingjin Qian (qian@ddn.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27209&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27209&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8849&quot; title=&quot;Client hangs on &amp;#39;lfs ladvise&amp;#39; with large input values&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8849&quot;&gt;&lt;del&gt;LU-8849&lt;/del&gt;&lt;/a&gt; ofd: Client hanges on ladvise with large start values&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b7772195e13edc573b33c49ff2a7e5de2d4de0bf&lt;/p&gt;</comment>
                            <comment id="203168" author="gerrit" created="Sat, 22 Jul 2017 02:54:40 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/27209/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27209/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8849&quot; title=&quot;Client hangs on &amp;#39;lfs ladvise&amp;#39; with large input values&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8849&quot;&gt;&lt;del&gt;LU-8849&lt;/del&gt;&lt;/a&gt; ofd: Client hanges on ladvise with large start values&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 829a24f644ceb453124b72c94584404d2e97f4f9&lt;/p&gt;</comment>
                            <comment id="203176" author="mdiep" created="Sat, 22 Jul 2017 03:53:38 +0000"  >&lt;p&gt;fix landed in lustre 2.11.0&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyw13:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>