<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:01:23 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6573] multiple tests: client evicted, Input/output error</title>
                <link>https://jira.whamcloud.com/browse/LU-6573</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for wangdi &amp;lt;di.wang@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f9a7140c-f404-11e4-9769-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f9a7140c-f404-11e4-9769-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_10b failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;dd /mnt/lustre2
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;10:05:09:Lustre: DEBUG MARKER: == sanityn test 10b: write of file with sub-page size on multiple mounts == 10:04:38 (1430906678)
10:05:09:LustreError: 11-0: lustre-OST0002-osc-ffff88007b4d1400: operation ldlm_enqueue to node 10.1.5.213@tcp failed: rc = -107
10:05:09:Lustre: lustre-OST0002-osc-ffff88007b4d1400: Connection to lustre-OST0002 (at 10.1.5.213@tcp) was lost; in progress operations using this service will wait for recovery to complete
10:05:09:LustreError: 167-0: lustre-OST0002-osc-ffff88007b4d1400: This client was evicted by lustre-OST0002; in progress operations using this service will fail.
10:05:09:Lustre: lustre-OST0002-osc-ffff88007b4d1400: Connection restored to lustre-OST0002 (at 10.1.5.213@tcp)
10:05:09:Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanityn test_10b: @@@@@@ FAIL: dd \/mnt\/lustre2 
10:05:09:Lustre: DEBUG MARKER: sanityn test_10b: @@@@@@ FAIL: dd /mnt/lustre2
10:05:09:Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /logdir/test_logs/2015-05-06/lustre-reviews-el6_6-x86_64--review-dne-part-1--2_8_1__32016__-69851820759220-072240/sanityn.test_10b.debug_log.$(hostname -s).1430906678.log;
10:05:09:         dmesg &amp;gt; /logdir/test_logs/2015-05-06/lustre-reviews-el
10:05:09:Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanityn test 11: execution of file opened for write should return error ====== 10:04:40 \(1430906680\)&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="29885">LU-6573</key>
            <summary>multiple tests: client evicted, Input/output error</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 6 May 2015 17:10:08 +0000</created>
                <updated>Fri, 1 Jul 2016 18:51:49 +0000</updated>
                            <resolved>Fri, 22 May 2015 17:49:08 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="114416" author="adilger" created="Wed, 6 May 2015 19:00:14 +0000"  >&lt;p&gt;Looks like the OST evicted the client before test_10b started:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanityn test 9: append of file with sub-page size on multiple mounts == 10:04:29 (1430906669)
Lustre: lustre-OST0002: haven&apos;t heard from client 40fe07c8-ede6-5a1d-89d2-dbf51d83c46a (at 10.1.5.215@tcp) in 48 seconds. I think it&apos;s dead, and I am evicting it. exp ffff8800690a9800, cur 1430906674 expire 1430906644 last 1430906626
Lustre: DEBUG MARKER: == sanityn test 10a: write of file with sub-page size on multiple mounts == 10:04:34 (1430906674)
Lustre: DEBUG MARKER: == sanityn test 10b: write of file with sub-page size on multiple mounts == 10:04:38 (1430906678)
Lustre: DEBUG MARKER: sanityn test_10b: @@@@@@ FAIL: dd /mnt/lustre2
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This might be due to slowness of the VM host or any number of issues.   It seems that this is failing randomly in a wide variety of different tests.&lt;/p&gt;</comment>
                            <comment id="114618" author="adilger" created="Thu, 7 May 2015 17:47:33 +0000"  >&lt;p&gt;I wonder if this failure and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6558&quot; title=&quot;replay-single: test_61c, test_90 timeout: nrs_orr_res_get() accessed NULL pointer&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6558&quot;&gt;&lt;del&gt;LU-6558&lt;/del&gt;&lt;/a&gt; are fallout from landing &lt;a href=&quot;http://review.whamcloud.com/9286&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/9286&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3266&quot; title=&quot;Regression tests for NRS policies&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3266&quot;&gt;&lt;del&gt;LU-3266&lt;/del&gt;&lt;/a&gt; test: regression tests for nrs policies&quot;, which is enabling NRS in sanity.sh test_77 but it seems that it isn&apos;t disabling NRS afterward?&lt;/p&gt;</comment>
                            <comment id="114708" author="niu" created="Fri, 8 May 2015 09:19:29 +0000"  >&lt;p&gt;I don&apos;t see why lots of replay-single test_58c failures were set as associated:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/03f97924-f11a-11e4-9e14-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/03f97924-f11a-11e4-9e14-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/75a610ae-f143-11e4-bb65-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/75a610ae-f143-11e4-bb65-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0d145610-f2d1-11e4-aad2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0d145610-f2d1-11e4-aad2-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/08da0d34-f442-11e4-a594-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/08da0d34-f442-11e4-a594-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ff57f6e2-f476-11e4-ac9e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ff57f6e2-f476-11e4-ac9e-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I think they should be caused by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4913&quot; title=&quot;mgc import reconnect race&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4913&quot;&gt;&lt;del&gt;LU-4913&lt;/del&gt;&lt;/a&gt; and should have been fixed by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5420&quot; title=&quot;Failure on test suite sanity test_17m: mount MDS failed, Input/output error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5420&quot;&gt;&lt;del&gt;LU-5420&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="114799" author="adilger" created="Sat, 9 May 2015 03:14:04 +0000"  >&lt;p&gt;Seen on the server:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: lustre-MDT0000: Denying connection for new client lustre-MDT0000-lwp-OST0001_UUID(at 192.168.5.153@o2ib), waiting for 3 known clients (0 recovered, 0 in progress, and 0 evicted) to recover in 0:59
Lustre: Skipped 25 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;so it seems that the LWP connection is being blocked during recovery, which probably isn&apos;t right since it won&apos;t affect recovery.&lt;/p&gt;</comment>
                            <comment id="114826" author="niu" created="Mon, 11 May 2015 02:47:26 +0000"  >&lt;p&gt;The denying LWP connection problem could be caused by 34a9bf694a584437e4bf7a037dea32a6271e93d3&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Author: Mikhail Pershin &amp;lt;mike.pershin@intel.com&amp;gt;
Date:   Tue Dec 3 17:30:15 2013 +0400

    LU-4214 target: LWP failover should create &lt;span class=&quot;code-keyword&quot;&gt;new&lt;/span&gt; export

    Upon reconnection from &lt;span class=&quot;code-keyword&quot;&gt;new&lt;/span&gt; NID LWP client need to create &lt;span class=&quot;code-keyword&quot;&gt;new&lt;/span&gt;
    export and destroy old one like that is done &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; MDS connection
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="114841" author="niu" created="Mon, 11 May 2015 10:16:14 +0000"  >&lt;p&gt;Looks something went wrong in the eviction mechanism. &lt;/p&gt;

&lt;p&gt;Take this failure &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/50e05d66-f589-11e4-91fd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/50e05d66-f589-11e4-91fd-5254006e85c2&lt;/a&gt; as example:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000100:00100000:0.0:1431048749.096032:0:24985:0:(service.c:2075:ptlrpc_server_handle_request()) Handling RPC pname:cluuid+ref:pid:xid:nid:opc mdt00_002:e6747c7f-1ec5-b75c-2bcb-cd4c06ffb96a+8:9457:x1500561708378636:12345-10.1.4.112@tcp:400
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDT received PING from client 10.1.4.112 on time 1431048749, and the PING was processed.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000100:00080000:0.0:1431048756.180583:0:25594:0:(service.c:1098:ptlrpc_update_export_timer()) lustre-MDT0000: Think about evicting 10.1.4.112@tcp from 1431048725
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;However, on time 1431048756, MDT think the last request from 10.1.4.112 is from time 1431048725, which is much older than 1431048749.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000100:00080000:1.0:1431048773.266900:0:24981:0:(pinger.c:637:ping_evictor_main()) evicting all exports of obd lustre-MDT0000 older than 1431048743
00000100:02000400:1.0:1431048773.266911:0:24981:0:(pinger.c:663:ping_evictor_main()) lustre-MDT0000: haven&lt;span class=&quot;code-quote&quot;&gt;&apos;t heard from client 8beae9f3-b969-37e0-7b9b-eb08f4d2c9ef (at 10.1.4.112@tcp) in 48 seconds. I think it&apos;&lt;/span&gt;s dead, and I am evicting it. exp ffff880072a0b800, cur 1431048773 expire 1431048743 last 1431048725
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;At last, on time 1431048773, the client 10.1.4.112 is evicted.&lt;/p&gt;

&lt;p&gt;Seems the last request time for the 10.1.4.112 was not updated properly somehow.&lt;/p&gt;</comment>
                            <comment id="114981" author="niu" created="Tue, 12 May 2015 03:29:45 +0000"  >&lt;p&gt;Oops, 8beae9f3-b969-37e0-7b9b-eb08f4d2c9ef and 8beae9f3-b969-37e0-7b9b-eb08f4d2c9ef are different mounts from same NID.&lt;/p&gt;</comment>
                            <comment id="115022" author="gerrit" created="Tue, 12 May 2015 14:59:17 +0000"  >&lt;p&gt;Niu Yawei (yawei.niu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/14777&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14777&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6573&quot; title=&quot;multiple tests: client evicted, Input/output error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6573&quot;&gt;&lt;del&gt;LU-6573&lt;/del&gt;&lt;/a&gt; ptlrpc: debug patch&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: eb866c9274821fb6a818d6ec42c2b486b324d0cc&lt;/p&gt;</comment>
                            <comment id="115023" author="niu" created="Tue, 12 May 2015 15:04:16 +0000"  >&lt;p&gt;I doubt that client doesn&apos;t send PINGs as expected, which caused server evict client at the end. The debug patch &lt;a href=&quot;http://review.whamcloud.com/#/c/14777/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/14777/&lt;/a&gt; provides more verbose messages on client handling PINGs, hope we can reproduce the problem with the patch applied.&lt;/p&gt;</comment>
                            <comment id="115320" author="bogl" created="Thu, 14 May 2015 14:53:25 +0000"  >&lt;p&gt;I think this is another instance, seen in master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/5265d50a-fa23-11e4-a7e2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/5265d50a-fa23-11e4-a7e2-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="115639" author="niu" created="Mon, 18 May 2015 10:13:38 +0000"  >&lt;p&gt;I saw in some failures that ping interval is 1 second (the default obd_timeout for autotest is 20 seconds, and the default ping interval should be 5 seconds), I think that sanity test_224c could be the culprit: it changed obd_timeout to 5 seconds during testing, it did change back to default value, but it didn&apos;t wait to it take effect. I fixed this in: &lt;a href=&quot;http://review.whamcloud.com/14844&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14844&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In most zfs test failures, the ping interval is the default 5 seconds, I&apos;m wondering if we&apos;d use longer obd_timeout/ping_interval for zfs testing?&lt;/p&gt;</comment>
                            <comment id="115641" author="gerrit" created="Mon, 18 May 2015 10:16:49 +0000"  >&lt;p&gt;Niu Yawei (yawei.niu@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/14844&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14844&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6573&quot; title=&quot;multiple tests: client evicted, Input/output error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6573&quot;&gt;&lt;del&gt;LU-6573&lt;/del&gt;&lt;/a&gt; test: fix sanity test_224c&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 942195be07082d82fca7dee225e9ddb0fe311290&lt;/p&gt;</comment>
                            <comment id="116113" author="gerrit" created="Thu, 21 May 2015 16:38:05 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/14844/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14844/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6573&quot; title=&quot;multiple tests: client evicted, Input/output error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6573&quot;&gt;&lt;del&gt;LU-6573&lt;/del&gt;&lt;/a&gt; test: fix sanity test_224c&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 551dc77e6ecd590d35fe7759124b98961642e831&lt;/p&gt;</comment>
                            <comment id="116231" author="pjones" created="Fri, 22 May 2015 17:49:08 +0000"  >&lt;p&gt;Landed for 2.8&lt;/p&gt;</comment>
                            <comment id="116431" author="adilger" created="Tue, 26 May 2015 18:00:15 +0000"  >&lt;p&gt;This was introduced by patch &lt;a href=&quot;http://review.whamcloud.com/14399&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14399&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6441&quot; title=&quot;OST problems following router node crash, inactive threads, clients continuously reconnecting&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6441&quot;&gt;&lt;del&gt;LU-6441&lt;/del&gt;&lt;/a&gt; ptlrpc: ptlrpc_bulk_abort unlink all entries in bd_mds&quot;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="30089">LU-6604</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="30290">LU-6623</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="29428">LU-6441</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="25727">LU-5407</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxcmn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>