<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:17:52 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1578] ll_ost_io_xx thread hangs and OST status get into down</title>
                <link>https://jira.whamcloud.com/browse/LU-1578</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;At the our customer site, some of ll_ost_io_xx threads hang and OST&apos;s status get into &quot;DOWN&quot; with &quot;lctl dl&quot;.&lt;br/&gt;
We saw the following Call traces on these OSSs during the time.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jun 28 16:22:18 nos011i kernel: Pid: 32708, comm: ll_ost_io_188
Jun 28 16:22:18 nos011i kernel: 
Jun 28 16:22:18 nos011i kernel: Call Trace:
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888a914d&amp;gt;] ldlm_cli_enqueue_local+0x4fd/0x520 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff800645e3&amp;gt;] __down_write_nested+0x7a/0x92
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff88bd7b29&amp;gt;] filter_destroy+0x969/0x1f90 [obdfilter]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff8876ecfd&amp;gt;] libcfs_debug_vmsg2+0x70d/0x970 [libcfs]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888d2cd2&amp;gt;] lustre_pack_reply_flags+0x8e2/0x950 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888d2d69&amp;gt;] lustre_pack_reply+0x29/0xb0 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff88b89070&amp;gt;] ost_destroy+0x660/0x790 [ost]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888ceef5&amp;gt;] lustre_msg_get_opc+0x35/0xf0 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff88b92a46&amp;gt;] ost_handle+0x1556/0x55b0 [ost]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888de6d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888dfdc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff888dee60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Jun 28 16:22:18 nos011i kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
Jun 28 16:22:18 nos011i kernel: 
Jun 28 16:22:18 nos011i kernel: Lustre: Service thread pid 8870 was inactive for 200.00s. The thread might be hung, or it might
 only be slow and will resume later. Dumping the stack trace for debugging purposes:
Jun 28 16:22:18 nos011i kernel: LustreError: dumping log to /tmp/lustre-log.1340868138.32708
Jun 28 16:22:18 nos011i kernel: Pid: 8870, comm: ll_ost_io_37
Jun 28 16:22:20 nos011i kernel: 
Jun 28 16:22:20 nos011i kernel: Call Trace:
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff800645e3&amp;gt;] __down_write_nested+0x7a/0x92
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff88bd7b29&amp;gt;] filter_destroy+0x969/0x1f90 [obdfilter]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff8876ecfd&amp;gt;] libcfs_debug_vmsg2+0x70d/0x970 [libcfs]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888d2cd2&amp;gt;] lustre_pack_reply_flags+0x8e2/0x950 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888d2d69&amp;gt;] lustre_pack_reply+0x29/0xb0 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff88b89070&amp;gt;] ost_destroy+0x660/0x790 [ost]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888ceef5&amp;gt;] lustre_msg_get_opc+0x35/0xf0 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff88b92a46&amp;gt;] ost_handle+0x1556/0x55b0 [ost]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888de6d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888dfdc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff888dee60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Jun 28 16:22:21 nos011i kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
Jun 28 16:22:21 nos011i kernel: 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>RHEL5.7</environment>
        <key id="15069">LU-1578</key>
            <summary>ll_ost_io_xx thread hangs and OST status get into down</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="ihara">Shuichi Ihara</reporter>
                        <labels>
                    </labels>
                <created>Thu, 28 Jun 2012 04:39:59 +0000</created>
                <updated>Fri, 31 Aug 2012 11:25:34 +0000</updated>
                            <resolved>Fri, 31 Aug 2012 11:25:34 +0000</resolved>
                                    <version>Lustre 1.8.7</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="41246" author="ihara" created="Thu, 28 Jun 2012 04:46:29 +0000"  >&lt;p&gt;two OSS&apos;s log files are attached. we saw this situation on these two OSSs.&lt;/p&gt;</comment>
                            <comment id="41300" author="cliffw" created="Fri, 29 Jun 2012 01:23:05 +0000"  >&lt;p&gt;Are you sure the disks were healthy during this time? &lt;br/&gt;
Can you get a stack dump of all threads when you see the OST in &apos;down&apos; state and attach to the bug? &lt;/p&gt;</comment>
                            <comment id="41317" author="bogl" created="Fri, 29 Jun 2012 12:26:03 +0000"  >&lt;p&gt;Could you try upgrading to 1.8.8-wc1?  There have been a number of fixes since 1.8.7 that may address your problem.&lt;/p&gt;</comment>
                            <comment id="41320" author="bogl" created="Fri, 29 Jun 2012 12:41:12 +0000"  >&lt;p&gt;By the way, where did the 1.8.7 release you are running come from, Whamcloud directly or some other supplier?  Can you upgrade to a Whamcloud release or is that path blocked for you?&lt;/p&gt;</comment>
                            <comment id="41321" author="ihara" created="Fri, 29 Jun 2012 12:47:57 +0000"  >&lt;p&gt;What is LU-XX affect this issue? really need detailed information if this ticket is duplicated or fixed in 1.8.8.&lt;br/&gt;
And, yes, vanilla 1.8.7-wc1 is running.&lt;/p&gt;</comment>
                            <comment id="41327" author="bogl" created="Fri, 29 Jun 2012 16:18:27 +0000"  >&lt;p&gt;Currently suspect &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-952&quot; title=&quot;Hung thread with HIGH OSS load&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-952&quot;&gt;&lt;del&gt;LU-952&lt;/del&gt;&lt;/a&gt; as being related to your issue.  Don&apos;t know for sure yet.&lt;/p&gt;</comment>
                            <comment id="41340" author="ihara" created="Fri, 29 Jun 2012 21:21:31 +0000"  >&lt;p&gt;we are monitoring all storage devices (not only health check, but latency, etc), but there are not much differences from normal. &lt;br/&gt;
btw, I forgot an thing, we already applied patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-952&quot; title=&quot;Hung thread with HIGH OSS load&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-952&quot;&gt;&lt;del&gt;LU-952&lt;/del&gt;&lt;/a&gt; to 1.8.7-wc1 at this site, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-952&quot; title=&quot;Hung thread with HIGH OSS load&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-952&quot;&gt;&lt;del&gt;LU-952&lt;/del&gt;&lt;/a&gt; should be fixed in this site.&lt;/p&gt;</comment>
                            <comment id="41403" author="ihara" created="Tue, 3 Jul 2012 12:04:54 +0000"  >&lt;p&gt;Bob, what&apos;s progress on this?&lt;br/&gt;
There was no storage side issues and patches for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-952&quot; title=&quot;Hung thread with HIGH OSS load&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-952&quot;&gt;&lt;del&gt;LU-952&lt;/del&gt;&lt;/a&gt; were already applied on top of lustre-1.8.7-wc1.&lt;/p&gt;</comment>
                            <comment id="41405" author="bogl" created="Tue, 3 Jul 2012 12:14:33 +0000"  >&lt;p&gt;There hasn&apos;t been much progress.  Attempts to reproduce haven&apos;t succeeded.  We don&apos;t yet have a good theory about how this might happen.&lt;/p&gt;

&lt;p&gt;Have more instances happened?  Were you able to gather the stack dump of all threads Cliff asked for 6/29?&lt;/p&gt;</comment>
                            <comment id="41480" author="ihara" created="Thu, 5 Jul 2012 07:49:36 +0000"  >&lt;p&gt;Today, we had same problem on few OSSs and some OSTs&apos;s status on the some clients due to refusing the connection by still busy with XX active RPCs. What&apos;s RPC waiting and where?&lt;br/&gt;
I uploaded these clients and OSS&apos;s debug files and syslog on /uploads/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1578&quot; title=&quot;ll_ost_io_xx thread hangs and OST status get into down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1578&quot;&gt;&lt;del&gt;LU-1578&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;This happens very often here and critical. Please investigate deeply.&lt;/p&gt;</comment>
                            <comment id="41679" author="ihara" created="Tue, 10 Jul 2012 20:05:57 +0000"  >&lt;p&gt;Bob, &lt;/p&gt;

&lt;p&gt;Any progress on this? the cusotmer is waiting for responses. thanks!&lt;/p&gt;</comment>
                            <comment id="41693" author="niu" created="Wed, 11 Jul 2012 04:04:22 +0000"  >&lt;p&gt;Hi, Ihara&lt;/p&gt;

&lt;p&gt;Is it possible to get the full stack trace of the OSS whe the &quot;ll_ost_io_xx thread hangs&quot; happen? that&apos;ll be easier for us to figure out if there is deadlock. Thanks.&lt;/p&gt;

&lt;p&gt;BTW: I found there is LBUG in the messages, it looks like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;del&gt;LU-1563&lt;/del&gt;&lt;/a&gt;, did you tried the fix from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;del&gt;LU-1563&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="41698" author="ihara" created="Wed, 11 Jul 2012 09:32:10 +0000"  >&lt;p&gt;Hi Niu,&lt;/p&gt;

&lt;p&gt;The patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;del&gt;LU-1563&lt;/del&gt;&lt;/a&gt; is not applied yet on this site. The next maintenance window on the next mask, we will apply these patches. So, we are still sometimes seeing this problem.&lt;/p&gt;

&lt;p&gt;Let me check we could get stack trace when ll_ost_io threads hangs.&lt;/p&gt;</comment>
                            <comment id="41728" author="ihara" created="Wed, 11 Jul 2012 22:21:17 +0000"  >&lt;p&gt;Niu, &lt;/p&gt;

&lt;p&gt;we got a similar situation on the another client, but it&apos;s ll_mdt_xx haning and MDT was down status. (got still busy with XX active RPCs) Should I file it as well? that situation, we could get stack traces when the problem happens.&lt;/p&gt;</comment>
                            <comment id="41729" author="niu" created="Wed, 11 Jul 2012 23:02:14 +0000"  >&lt;p&gt;Yes, please get the full stack trace. I think it should be different problem, let&apos;s check the stack trace first. Thanks.&lt;/p&gt;</comment>
                            <comment id="41730" author="ihara" created="Wed, 11 Jul 2012 23:48:54 +0000"  >&lt;p&gt;filed nmd01.stack on uploads/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1578&quot; title=&quot;ll_ost_io_xx thread hangs and OST status get into down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1578&quot;&gt;&lt;del&gt;LU-1578&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;thank you!&lt;/p&gt;</comment>
                            <comment id="41734" author="niu" created="Thu, 12 Jul 2012 02:00:14 +0000"  >&lt;p&gt;I think it&apos;s &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;del&gt;LU-1563&lt;/del&gt;&lt;/a&gt;, a mdt thread hit LBUG in in quota_pending_commit(), which is holding ldlm lock and blocked other operations.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;ll_mdt_34     D ffff810021004420     0  7589      1          7590  7588 (L-TLB)
 ffff8105fc56f720 0000000000000046 0000000000000000 0000000000000286
 ffff8105fc56f6e0 0000000000000009 ffff8105f72a07e0 ffffffff80316b60
 00023eb511a49dac 000000000000109f ffff8105f72a09c8 000000008003bbca
Call Trace:
 [&amp;lt;ffffffff88767c26&amp;gt;] :libcfs:lbug_with_loc+0xc6/0xd0
 [&amp;lt;ffffffff8876ffc0&amp;gt;] :libcfs:tracefile_init+0x0/0x110
 [&amp;lt;ffffffff889c02ef&amp;gt;] :lquota:quota_pending_commit+0x41f/0x5b0
 [&amp;lt;ffffffff88aff5d1&amp;gt;] :jbd2:jbd2_journal_stop+0x209/0x215
 [&amp;lt;ffffffff88b40317&amp;gt;] :ldiskfs:__ldiskfs_journal_stop+0x67/0x80
 [&amp;lt;ffffffff88c24d8b&amp;gt;] :mds:mds_open+0x30eb/0x3758
 [&amp;lt;ffffffff888d1491&amp;gt;] :ptlrpc:lustre_swab_buf+0x81/0x170
 [&amp;lt;ffffffff8002cc91&amp;gt;] mntput_no_expire+0x19/0x89
 [&amp;lt;ffffffff8000d5a8&amp;gt;] dput+0x2c/0x113
 [&amp;lt;ffffffff88bfb0d5&amp;gt;] :mds:mds_reint_rec+0x365/0x550
 [&amp;lt;ffffffff88c259be&amp;gt;] :mds:mds_update_unpack+0x1fe/0x280
 [&amp;lt;ffffffff88bededa&amp;gt;] :mds:mds_reint+0x35a/0x420
 [&amp;lt;ffffffff88becdea&amp;gt;] :mds:fixup_handle_for_resent_req+0x5a/0x2c0
 [&amp;lt;ffffffff88bf7c1c&amp;gt;] :mds:mds_intent_policy+0x4ac/0xc20
 [&amp;lt;ffffffff88892270&amp;gt;] :ptlrpc:ldlm_resource_putref_internal+0x230/0x460
 [&amp;lt;ffffffff8888feb6&amp;gt;] :ptlrpc:ldlm_lock_enqueue+0x186/0xb20
 [&amp;lt;ffffffff8888c7fd&amp;gt;] :ptlrpc:ldlm_lock_create+0x9bd/0x9f0
 [&amp;lt;ffffffff888b4870&amp;gt;] :ptlrpc:ldlm_server_blocking_ast+0x0/0x83d
 [&amp;lt;ffffffff888b1b39&amp;gt;] :ptlrpc:ldlm_handle_enqueue+0xc09/0x1210
 [&amp;lt;ffffffff88bf6b40&amp;gt;] :mds:mds_handle+0x40e0/0x4d10
 [&amp;lt;ffffffff8008e1a4&amp;gt;] enqueue_task+0x41/0x56
 [&amp;lt;ffffffff8008e20f&amp;gt;] __activate_task+0x56/0x6d
 [&amp;lt;ffffffff888d5d55&amp;gt;] :ptlrpc:lustre_msg_get_conn_cnt+0x35/0xf0
 [&amp;lt;ffffffff888df6d9&amp;gt;] :ptlrpc:ptlrpc_server_handle_request+0x989/0xe00
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="41735" author="ihara" created="Thu, 12 Jul 2012 03:53:38 +0000"  >&lt;p&gt;OK, thanks. &lt;br/&gt;
I will post stack trace when ll_ost_io_xx thread hangs on the next time.&lt;/p&gt;</comment>
                            <comment id="41792" author="ihara" created="Fri, 13 Jul 2012 08:00:44 +0000"  >&lt;p&gt;Niu, &lt;/p&gt;

&lt;p&gt;when we evicted on the client manually, it connected well well.&lt;br/&gt;
But, we see same problem on the another node sometimes. If we evicted, is the holding lock released? or just connect with different thread and still keeping the lock? this is why client can connect? &lt;br/&gt;
But, the root cause of LBUG can&apos;t fix, this is why we saw same issue on the another client. is this correct? &lt;/p&gt;</comment>
                            <comment id="41996" author="ihara" created="Thu, 19 Jul 2012 04:30:24 +0000"  >&lt;p&gt;Hello Niu,&lt;/p&gt;

&lt;p&gt;I just upload today&apos;s MDS&apos;s syslog on uploads/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1578&quot; title=&quot;ll_ost_io_xx thread hangs and OST status get into down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1578&quot;&gt;&lt;del&gt;LU-1578&lt;/del&gt;&lt;/a&gt;/messages.nmd01. We hit same problem today, but not only single client, but saw Down states on the many clients. As far as we see the log files, it seems to be same issue. &lt;br/&gt;
Howerver, question is that as I asked before, what exctly happens if MDS does LBUG and ll_mdt_xx hangs?&lt;br/&gt;
Please advise!&lt;/p&gt;</comment>
                            <comment id="41999" author="niu" created="Thu, 19 Jul 2012 07:00:55 +0000"  >&lt;p&gt;yes, I checked log, it seems same problem (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;del&gt;LU-1563&lt;/del&gt;&lt;/a&gt;). If the MDS hit the LBUG, then MDS will not responsive and the whole cluster will not functional.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="11663" name="messages.tar.gz" size="469013" author="ihara" created="Thu, 28 Jun 2012 04:46:29 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvgsn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6370</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>