<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:06:50 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7198] vvp_io.c:701:vvp_io_fault_start()) binary changed while waiting for the page fault lock</title>
                <link>https://jira.whamcloud.com/browse/LU-7198</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We continue to see messages like this:&lt;br/&gt;
&amp;lt;ConMan&amp;gt; Console &lt;span class=&quot;error&quot;&gt;&amp;#91;sierra562&amp;#93;&lt;/span&gt; log at 2015-07-20 15:00:00 PDT.&lt;br/&gt;
2015-07-20 15:56:56 Lustre: 13097:0:(vvp_io.c:701:vvp_io_fault_start()) binary &lt;span class=&quot;error&quot;&gt;&amp;#91;0x20004f83e:0x3bc8:0x0&amp;#93;&lt;/span&gt; changed while waiting for the page fault lock&lt;/p&gt;

&lt;p&gt;&amp;lt;ConMan&amp;gt; Console &lt;span class=&quot;error&quot;&gt;&amp;#91;sierra562&amp;#93;&lt;/span&gt; log at 2015-07-20 16:00:00 PDT.&lt;br/&gt;
2015-07-20 16:13:50 Lustre: 13626:0:(vvp_io.c:701:vvp_io_fault_start()) binary &lt;span class=&quot;error&quot;&gt;&amp;#91;0x20004f83e:0x3bc8:0x0&amp;#93;&lt;/span&gt; changed while waiting for the page fault lock&lt;br/&gt;
2015-07-20 16:30:41 Lustre: 13679:0:(vvp_io.c:701:vvp_io_fault_start()) binary &lt;span class=&quot;error&quot;&gt;&amp;#91;0x20004f83e:0x3bc8:0x0&amp;#93;&lt;/span&gt; changed while waiting for the page fault lock&lt;/p&gt;

&lt;p&gt;&amp;lt;ConMan&amp;gt; Console &lt;span class=&quot;error&quot;&gt;&amp;#91;sierra562&amp;#93;&lt;/span&gt; log at 2015-07-20 17:00:00 PDT.&lt;br/&gt;
2015-07-20 17:04:21 Lustre: 13807:0:(vvp_io.c:701:vvp_io_fault_start()) binary &lt;span class=&quot;error&quot;&gt;&amp;#91;0x20004f83e:0x3bc8:0x0&amp;#93;&lt;/span&gt; changed while waiting for the page fault lock&lt;br/&gt;
2015-07-20 17:38:08 Lustre: 14373:0:(vvp_io.c:701:vvp_io_fault_start()) binary &lt;span class=&quot;error&quot;&gt;&amp;#91;0x20004f83e:0x3bc8:0x0&amp;#93;&lt;/span&gt; changed while waiting for the page fault lock&lt;/p&gt;

&lt;p&gt;Creating a new issue rather than reopen &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4308&quot; title=&quot;MPI job causes errors &amp;quot;binary changed while waiting for the page fault lock&amp;quot;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4308&quot;&gt;&lt;del&gt;LU-4308&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</description>
                <environment>Livermore branch: lustre-2.5.4-4chaos_2.6.32_504.16.2.1chaos.ch5.3.x86_64.x86_64</environment>
        <key id="32278">LU-7198</key>
            <summary>vvp_io.c:701:vvp_io_fault_start()) binary changed while waiting for the page fault lock</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="marc@llnl.gov">D. Marc Stearman</reporter>
                        <labels>
                            <label>llnl</label>
                            <label>llnlfixready</label>
                    </labels>
                <created>Tue, 22 Sep 2015 20:41:02 +0000</created>
                <updated>Thu, 14 Jun 2018 21:41:18 +0000</updated>
                            <resolved>Mon, 25 Apr 2016 20:25:34 +0000</resolved>
                                    <version>Lustre 2.5.4</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>16</watches>
                                                                            <comments>
                            <comment id="128275" author="jgmitter" created="Wed, 23 Sep 2015 18:09:47 +0000"  >&lt;p&gt;Hi Bobijam,&lt;br/&gt;
Can you take this issue?&lt;br/&gt;
Thanks.&lt;br/&gt;
Joe&lt;/p&gt;</comment>
                            <comment id="128325" author="gerrit" created="Thu, 24 Sep 2015 03:21:23 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/16622&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/16622&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7198&quot; title=&quot;vvp_io.c:701:vvp_io_fault_start()) binary changed while waiting for the page fault lock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7198&quot;&gt;&lt;del&gt;LU-7198&lt;/del&gt;&lt;/a&gt; llite: take/release lli_trunc_sem in vvp_io layer&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_5&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: efcd5d8ea9b6a241d588959a7a4688998105fcc5&lt;/p&gt;</comment>
                            <comment id="131076" author="morrone" created="Thu, 22 Oct 2015 00:06:46 +0000"  >&lt;p&gt;Lustre 2.5.4-12chaos contains change 16650 targeting the b2_5_fe branch.  That did not fix the stated problem:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2015-10-19 18:34:53 Lustre: 87030:0:(vvp_io.c:716:vvp_io_fault_start()) binary [0x200053b04:0xee27:0x0] changed while waiting for the page fault lock

&amp;lt;ConMan&amp;gt; Console [cab34] log at 2015-10-19 19:00:00 PDT.

&amp;lt;ConMan&amp;gt; Console [cab34] log at 2015-10-19 20:00:00 PDT.
2015-10-19 20:21:21 Lustre: 88635:0:(vvp_io.c:716:vvp_io_fault_start()) binary [0x200053b04:0xee27:0x0] changed while waiting for the page fault lock
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Keep trying.&lt;/p&gt;</comment>
                            <comment id="131077" author="morrone" created="Thu, 22 Oct 2015 00:41:00 +0000"  >&lt;p&gt;And, even worse, we now have applications hanging in sys_ftruncate all over the place.  I can&apos;t help but think that this patch that messes with locking around vvp_io_setattr_start() is to blame for our current major center-wide disruption.&lt;/p&gt;

&lt;p&gt;Here is the place where code is hanging:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2015-10-21 17:31:35 XXXXXX       D 0000000000000006     0 28745  28734 0x00000004
2015-10-21 17:31:35  ffff8805f0befb98 0000000000000086 ffff88034ac159c0 ffff88033b277150
2015-10-21 17:31:35  00ff8805f0befc28 0000000000000000 ffff8805f0befb28 ffff88034ac159c0
2015-10-21 17:31:35  ffff8805eee3e0c0 0000000000000006 ffff8805f3043068 ffff8805f0beffd8
2015-10-21 17:31:35 Call Trace:
2015-10-21 17:31:35  [&amp;lt;ffffffff8153e475&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
2015-10-21 17:31:35  [&amp;lt;ffffffff8153e5d3&amp;gt;] rwsem_down_write_failed+0x23/0x30
2015-10-21 17:31:35  [&amp;lt;ffffffff8129fac3&amp;gt;] call_rwsem_down_write_failed+0x13/0x20
2015-10-21 17:31:35  [&amp;lt;ffffffff8153daa5&amp;gt;] ? down_write+0x45/0x50
2015-10-21 17:31:35  [&amp;lt;ffffffffa0b0c229&amp;gt;] vvp_io_setattr_start+0x129/0x170 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffffa0638c7a&amp;gt;] cl_io_start+0x6a/0x140 [obdclass]
2015-10-21 17:31:35  [&amp;lt;ffffffffa063d384&amp;gt;] cl_io_loop+0xb4/0x1b0 [obdclass]
2015-10-21 17:31:35  [&amp;lt;ffffffffa0b04948&amp;gt;] cl_setattr_ost+0x218/0x2f0 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffffa0ad06f1&amp;gt;] ll_setattr_raw+0xa31/0x1060 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffffa0ad0d85&amp;gt;] ll_setattr+0x65/0xd0 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffff811b1de8&amp;gt;] notify_change+0x168/0x340
2015-10-21 17:31:35  [&amp;lt;ffffffff81190bd4&amp;gt;] do_truncate+0x64/0xa0
2015-10-21 17:31:35  [&amp;lt;ffffffff8153c771&amp;gt;] ? mutex_lock+0x31/0x60
2015-10-21 17:31:35  [&amp;lt;ffffffff81190f80&amp;gt;] sys_ftruncate+0x120/0x130
2015-10-21 17:31:35  [&amp;lt;ffffffff8100b112&amp;gt;] system_call_fastpath+0x16/0x1b
2015-10-21 17:31:35 XXXXXX       D 000000000000000b     0 28750  28734 0x00000004
2015-10-21 17:31:35  ffff88054385bd08 0000000000000082 0000000000000000 ffff8805dce716e0
2015-10-21 17:31:35  ffff88054385bcc8 0000000000000286 00005ab6b25e0bdc ffff88054385bd64
2015-10-21 17:31:35  ffffffffa0b3bb00 0000000105ec1b77 ffff8806395a05f8 ffff88054385bfd8
2015-10-21 17:31:35 Call Trace:
2015-10-21 17:31:35  [&amp;lt;ffffffff8153cc66&amp;gt;] __mutex_lock_slowpath+0x96/0x210
2015-10-21 17:31:35  [&amp;lt;ffffffff8153c77e&amp;gt;] mutex_lock+0x3e/0x60
2015-10-21 17:31:35  [&amp;lt;ffffffffa0acff9a&amp;gt;] ll_setattr_raw+0x2da/0x1060 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffffa0ad0d85&amp;gt;] ll_setattr+0x65/0xd0 [lustre]
2015-10-21 17:31:35  [&amp;lt;ffffffff811b1de8&amp;gt;] notify_change+0x168/0x340
2015-10-21 17:31:35  [&amp;lt;ffffffff81190bd4&amp;gt;] do_truncate+0x64/0xa0
2015-10-21 17:31:35  [&amp;lt;ffffffff8153c771&amp;gt;] ? mutex_lock+0x31/0x60
2015-10-21 17:31:35  [&amp;lt;ffffffff81190f80&amp;gt;] sys_ftruncate+0x120/0x130
2015-10-21 17:31:35  [&amp;lt;ffffffff8100b112&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="131092" author="bobijam" created="Thu, 22 Oct 2015 01:47:10 +0000"  >&lt;p&gt;Would you mind getting the code line whereabout of ll_setattr_raw+0x2da? &lt;/p&gt;</comment>
                            <comment id="131129" author="jay" created="Thu, 22 Oct 2015 05:52:10 +0000"  >&lt;p&gt;also please upload the reproduce program so that we can use it to check if the same problem exists in master.&lt;/p&gt;</comment>
                            <comment id="131201" author="morrone" created="Thu, 22 Oct 2015 17:03:29 +0000"  >&lt;p&gt;I like your optimism, but no, we don&apos;t have a reproducer to upload.&lt;/p&gt;

&lt;p&gt;I can get the line number later today when I&apos;m free.&lt;/p&gt;</comment>
                            <comment id="131261" author="morrone" created="Thu, 22 Oct 2015 21:51:44 +0000"  >&lt;p&gt;ll_setattr_raw+0x2da looks like the obvious location,  mutex_lock on line 1620 below:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;1616			}
1617			ll_finish_md_op_data(op_data);
1618		}
1619		if (!S_ISDIR(inode-&amp;gt;i_mode)) {
1620			mutex_lock(&amp;amp;inode-&amp;gt;i_mutex);
1621			if ((attr-&amp;gt;ia_valid &amp;amp; ATTR_SIZE) &amp;amp;&amp;amp; !hsm_import)
1622				inode_dio_wait(inode);
1623		}
1624	
1625		ll_stats_ops_tally(ll_i2sbi(inode), (attr-&amp;gt;ia_valid &amp;amp; ATTR_SIZE)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="131674" author="bobijam" created="Tue, 27 Oct 2015 15:32:55 +0000"  >&lt;p&gt;still investigating, haven&apos;t found the deallock root cause.&lt;/p&gt;</comment>
                            <comment id="131792" author="bobijam" created="Wed, 28 Oct 2015 08:09:16 +0000"  >&lt;p&gt;Is there other kind of Dead threads besides these two? I think thread 28750 was waiting for the i_mutex which 28745 was holding, while 28745 seemed waiting for a write lock which is either lli-&amp;gt;lli_trunc_sem  (most likely) or i_alloc_sem (unlikely), and there should be other thread holding this lli_trunc_sem.&lt;/p&gt;</comment>
                            <comment id="131883" author="morrone" created="Wed, 28 Oct 2015 18:13:00 +0000"  >&lt;p&gt;Unfortunately, I do not have that information.&lt;/p&gt;</comment>
                            <comment id="133100" author="bobijam" created="Tue, 10 Nov 2015 04:27:26 +0000"  >&lt;p&gt;Can you provide bt -a output and keep thread name masqueraded if necessary?&lt;/p&gt;</comment>
                            <comment id="133209" author="bobijam" created="Wed, 11 Nov 2015 06:57:41 +0000"  >&lt;p&gt;I find out the deadlock reason, this patch need some prepositional patch to drop the i_mutex usage in write path, or else vvp_io_write_start() will held lli-&amp;gt;lli_trunc_sem then call lustre_generic_file_write()-&amp;gt;generic_file_aio_write() which will get i_mutex, and this order is different from other IO path (setattr, truncate, etc).&lt;/p&gt;

&lt;p&gt;So I need push some prepositional patch before this one.&lt;/p&gt;</comment>
                            <comment id="133213" author="bobijam" created="Wed, 11 Nov 2015 08:45:14 +0000"  >&lt;p&gt;updated patch #16650 which depends on #17117 and #17116 (these two patches drops i_mutex usage in write path).&lt;/p&gt;</comment>
                            <comment id="133287" author="morrone" created="Wed, 11 Nov 2015 18:50:28 +0000"  >&lt;p&gt;We can&apos;t provide further information on the buggy patch 16500 (bt -a).  We stopped using it in production as soon as we identified that patch as the problem.&lt;/p&gt;

&lt;p&gt;Keep in mind that not only did this patch introduce a deadlock, it also did not fix the error messages which this ticket is about.  If you have only fixed the deadlock, then you should really be tracking your 16650 backport in some other ticket.&lt;/p&gt;

&lt;p&gt;If you think it will address our error message, we would appreciate an explanation of why the latest revision is expected to address the error message versus the previous one.&lt;/p&gt;
</comment>
                            <comment id="133484" author="jay" created="Fri, 13 Nov 2015 19:06:21 +0000"  >&lt;p&gt;After taking a further look, I tend to think timestamp checking in vvp_io_fault_start() is not atomic therefore it&apos;s normal to get this kind of mismatch.&lt;/p&gt;

&lt;p&gt;Taking a look at ll_merge_lvb() in b2_5, it first assigns mtime from MDS to inode&apos;s i_mtime, and then merge timestamp from OST objects. However, the timestamp from MDS could be stale because we maintain mtime mainly on OST objects(thinking about Lustre write implementation), and if the check in vvp_io_fault_start() happens before mtime on OST objects are merged, definitely it will get wrong timestamp from the inode, even the timestamp it fetched in vvp_io_fault_init() could be wrong in the first place.&lt;/p&gt;

&lt;p&gt;I tend to think we should remove the timestamp check in vvp_io_fault_start().&lt;/p&gt;</comment>
                            <comment id="136444" author="pjones" created="Tue, 15 Dec 2015 22:22:33 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Do you think that Jinshan&apos;s suggestion is viable? If so please can you create a patch on that basis?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="136487" author="bobijam" created="Wed, 16 Dec 2015 01:10:52 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#/c/17334/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/17334/&lt;/a&gt; is the patch&lt;/p&gt;</comment>
                            <comment id="136495" author="pjones" created="Wed, 16 Dec 2015 01:52:17 +0000"  >&lt;p&gt;Bobi&lt;/p&gt;

&lt;p&gt;LLNL will need a fix targeted on b2_5_fe&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="136497" author="bobijam" created="Wed, 16 Dec 2015 01:59:31 +0000"  >&lt;p&gt;port for b2_5_fe &lt;a href=&quot;http://review.whamcloud.com/17624&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17624&lt;/a&gt; &lt;/p&gt;</comment>
                            <comment id="143012" author="marc@llnl.gov" created="Fri, 19 Feb 2016 19:04:54 +0000"  >&lt;p&gt;Chris, can you pull in the latest patch please?&lt;/p&gt;</comment>
                            <comment id="143043" author="morrone" created="Fri, 19 Feb 2016 21:46:18 +0000"  >&lt;p&gt;There are no reviews on the backported patch, and it failed automated testing.&lt;/p&gt;</comment>
                            <comment id="143044" author="marc@llnl.gov" created="Fri, 19 Feb 2016 21:51:07 +0000"  >&lt;p&gt;Peter, please add this to the work queue.  When a patch is ready for us, please add the &quot;llnlfixready&quot; lable.&lt;/p&gt;</comment>
                            <comment id="146949" author="morrone" created="Fri, 25 Mar 2016 18:30:56 +0000"  >&lt;p&gt;We need a master branch version of this patch as well.&lt;/p&gt;</comment>
                            <comment id="146996" author="gerrit" created="Sat, 26 Mar 2016 14:39:18 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/19162&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/19162&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7198&quot; title=&quot;vvp_io.c:701:vvp_io_fault_start()) binary changed while waiting for the page fault lock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7198&quot;&gt;&lt;del&gt;LU-7198&lt;/del&gt;&lt;/a&gt; clio: remove mtime check in vvp_io_fault_start()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: f35f69b866566ce8b7852facf8c914da297e9be0&lt;/p&gt;</comment>
                            <comment id="149978" author="gerrit" created="Mon, 25 Apr 2016 04:16:34 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/19162/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/19162/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7198&quot; title=&quot;vvp_io.c:701:vvp_io_fault_start()) binary changed while waiting for the page fault lock&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7198&quot;&gt;&lt;del&gt;LU-7198&lt;/del&gt;&lt;/a&gt; clio: remove mtime check in vvp_io_fault_start()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: d62f58c32074bc54055801ed0f919bfd5c277f6d&lt;/p&gt;</comment>
                            <comment id="150101" author="pjones" created="Mon, 25 Apr 2016 20:25:34 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="22232">LU-4308</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxofj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>