<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:34:19 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10356] CLIO simplification broke a direct IO sometimes</title>
                <link>https://jira.whamcloud.com/browse/LU-10356</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;3.10 and some early kernels (RHEL6 also have it likely).&lt;br/&gt;
have a buffered write failback if direct io can failed after some offset.&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                written = generic_file_direct_write(iocb, iov, &amp;amp;nr_segs, pos,
                                                        ppos, count, ocount);
                /*
                 * If the write stopped &lt;span class=&quot;code-object&quot;&gt;short&lt;/span&gt; of completing, fall back to
                 * buffered writes.  Some filesystems &lt;span class=&quot;code-keyword&quot;&gt;do&lt;/span&gt; &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; writes to
                 * holes, &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; example.  For DAX files, a buffered write will
                 * not succeed (even &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; it did, DAX does not handle dirty
                 * page-cache pages correctly).
                 */
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (written &amp;lt; 0 || written == count || IS_DAX(inode))
                        &lt;span class=&quot;code-keyword&quot;&gt;goto&lt;/span&gt; out;
                pos += written;
                count -= written;
                written_buffered = generic_file_buffered_write(iocb, iov,
                                                nr_segs, pos, ppos, count,
                                                written);
                /*
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;it caused a situation when buffered io will be send long after direct io finished for this segment&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
00000080:00200000:1.0:1512485492.000543:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=393216=60000  pages 16 (max 348160)
00000080:00008000:1.0:1512485492.003959:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 1.
00000080:00008000:1.0:1512485492.003984:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 2.
00000080:00008000:1.0:1512485492.004008:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 3.
00000080:00008000:1.0:1512485492.004031:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 4.
00000080:00008000:1.0:1512485492.004054:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 5.
00000080:00008000:1.0:1512485492.004077:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 6.
00000080:00008000:1.0:1512485492.004147:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 7.
00000080:00008000:1.0:1512485492.004172:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 8.
00000080:00008000:1.0:1512485492.004195:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 9.
00000080:00008000:1.0:1512485492.004218:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 10.
00000080:00008000:1.0:1512485492.004241:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 11.
00000080:00008000:1.0:1512485492.004264:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 12.
00000080:00008000:1.0:1512485492.004287:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 13.
00000080:00008000:1.0:1512485492.004311:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 14.
00000080:00008000:1.0:1512485492.004327:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 15.
00000080:00008000:1.0:1512485492.004343:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 16.
00000080:00008000:1.0:1512485492.004491:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 17.
00000080:00008000:1.0:1512485492.004506:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 18.
00000080:00008000:1.0:1512485492.004522:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 19.
00000080:00008000:1.0:1512485492.004537:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 20.
00000080:00008000:1.0:1512485492.004553:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 21.
00000080:00008000:1.0:1512485492.004568:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 22.
00000080:00008000:1.0:1512485492.004583:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 23.
00000080:00008000:1.0:1512485492.004599:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 24.
00000080:00008000:1.0:1512485492.004614:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 25.
00000080:00008000:1.0:1512485492.004629:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 26.
00000080:00008000:1.0:1512485492.004653:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 27.
00000080:00008000:1.0:1512485492.004676:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 28.
00000080:00008000:1.0:1512485492.004699:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 29.
00000080:00008000:1.0:1512485492.004722:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 30.
00000080:00008000:1.0:1512485492.004745:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 31.
00000080:00008000:1.0:1512485492.004767:0:6427:0:(rw26.c:770:ll_write_end()) queued page: 32.
00000080:00200000:1.0:1512485492.004969:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=589824=90000, pages 16 (max 348160)
00000080:00200000:1.0:1512485492.007839:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=655360=a0000, pages 16 (max 348160)
00000080:00200000:1.0:1512485492.010532:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=720896=b0000, pages 16 (max 348160)
00000080:00200000:1.0:1512485492.013680:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=786432=c0000, pages 16 (max 348160)
00000080:00200000:1.0:1512485492.016534:0:6427:0:(rw26.c:496:ll_direct_IO()) VFS Op:inode=[0x200000407:0x15:0x0](ffff88007a96b690), size=65536 (max 1426063360), offset=851968=d0000, pages 16 (max 348160)
00000080:00000001:1.0:1512485492.024648:0:6427:0:(file.c:3093:cl_sync_file_range()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; entered
00000080:00200000:1.0:1512485492.024686:0:6427:0:(vvp_io.c:943:vvp_io_write_commit()) commit ffff8800792bd438/ffff88003cbd14a0 async pages: 128, from 0, to 4096
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;it caused a two problems.&lt;br/&gt;
1) don&apos;t optimal RPC generated for buffered part, DIO segment was send and committed to the OST while buffered part still on client cache and send just as part of cl_sync_file_range, so extra seek needs.&lt;br/&gt;
2) data corruption potentially as first page in this queue isn&apos;t same as expected offset for this operation.&lt;/p&gt;</description>
                <environment>RHEL7 / 2.10 ; RHEL7 / master; # ./diotest5 -b 65536 -i 1000 -f /mnt/lustre/file2; test taken from LTP suite (need set a full debug to easy hit for me).</environment>
        <key id="49657">LU-10356</key>
            <summary>CLIO simplification broke a direct IO sometimes</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="shadow">Alexey Lyashkov</reporter>
                        <labels>
                    </labels>
                <created>Fri, 8 Dec 2017 11:37:47 +0000</created>
                <updated>Sat, 29 May 2021 07:40:35 +0000</updated>
                            <resolved>Tue, 27 Feb 2018 04:26:15 +0000</resolved>
                                    <version>Lustre 2.10.1</version>
                                    <fixVersion>Lustre 2.11.0</fixVersion>
                    <fixVersion>Lustre 2.10.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="215821" author="pjones" created="Fri, 8 Dec 2017 18:56:43 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Can you please advise?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="217037" author="shadow" created="Fri, 22 Dec 2017 05:09:20 +0000"  >&lt;p&gt;Peter,&lt;/p&gt;

&lt;p&gt;as I right understand - it&apos;s backside effect of CLIO simplification. Before this task - kernel was create a single page queue where DIO code was put pages and it was send to the CLIO stack once, but now it will be send as continues region. But CLIO simplification broke this way by removing transient pages in common queue.&lt;/p&gt;

&lt;p&gt;as about second part - cray have a patch and Vladimir Savelev should submit it, but it not a best solution from performance perspective.&lt;/p&gt;</comment>
                            <comment id="217191" author="vsaveliev" created="Tue, 26 Dec 2017 19:44:35 +0000"  >&lt;p&gt;The direct IO write may proceed as a mixture of segments written via direct IO and segments written via page cache.&lt;/p&gt;

&lt;p&gt;Suppose a test interleaves direct IO write and buffered read (originated from ltp/testcases/kernel/io/direct_io/diotest5.c).&lt;/p&gt;

&lt;p&gt;The buffered read creates 20 pages in order to do readahead. Linux kernel batches multiple page additions to lru list via page vector of 14 (PAGEVEC_SIZE) pages (see&#160;__lru_cache_add). Due to that after the read there are 14 pages in lru list and 6 pages in page vector with increased reference counter.&lt;/p&gt;

&lt;p&gt;When direct IO write comes to write page #0 it tries to invalidate the page and to drain lru add page vector:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;__generic_file_aio_write() {
    &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (O_DIRECT) {
        generic_file_direct_write() {
            filemap_write_and_wait_range();
            invalidate_inode_pages2_range() {
                invalidate_complete_page2() {
                ll_releasepage();
                pagevec_release() {
                    lru_add_drain() {
                        lru_add_drain_cpu();
                    }
                }
            }
            mapping-&amp;gt;a_ops-&amp;gt;direct_IO();
        }
    }
}

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The pages from #0 to #13 are releasable, and pagevec_release() is supposed to drain remaining 6 pages from page vector to lru list. The page vector is a per-cpu variable, and if the preceding buffered read ran on different cpu, this time pagevec_release() makes no effect of the pages #14..#19 and they remain in page vector associated with that cpu with increased reference counter and therefore are not releasable, as ll_releasepage() fails on page ref count check:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;ll_releasepage()
...
&#160; &#160; &#160; &#160;&#160;&lt;span class=&quot;code-comment&quot;&gt;/* 1 &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; caller, 1 &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; cl_page and 1 &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; page cache */&lt;/span&gt;
&#160; &#160; &#160; &#160; &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (page_count(vmpage) &amp;gt; 3)
&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; 0;
...

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The subsequent calls to invalidate_complete_page2() for pages from #14 to #19 fail. __generic_file_aio_write() performs buffered write for those pages.&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;__generic_file_aio_write() {
    &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (O_DIRECT) {
        generic_file_direct_write() {
        }
&lt;span class=&quot;code-comment&quot;&gt;/* after ll_releasepage() failures, direct IO resorts to buffered write */&lt;/span&gt;
        generic_file_buffered_write();
        filemap_write_and_wait_range();
        invalidate_mapping_pages();
    }
}

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Pages written via buffered write remain queued by ll_write_end() for async commit until vvp_io_write_commit() comes in. That is why filemap_write_and_wait_range(); invalidate_mapping_pages(); makes no effect on those 6 pages.&lt;/p&gt;

&lt;p&gt;On the next round buffered read creates only 14 pages in page vector (#0..#13), as pages #14..#19 still exist. The page vector is not drained so all 14 pages are not in the lru list yet and therefore are not releasable.&lt;br/&gt;
 Direct IO write tries to release page #0, fails and processes via buffered write. However, pages #1..#13 are made&#160;releasable by invalidate_inode_pages2_range()&lt;del&gt;&amp;gt;pagevec_release()&lt;/del&gt;&amp;gt;lru_add_drain() called for page #0 and get written by direct IO. Pages #14..#19 are still not releasable and get written via buffered write.&lt;/p&gt;

&lt;p&gt;So, on this round pages #0, #14..#19 are queued for async commit which hits the below assertion which requires only subsequent pages to be queued.&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;vvp_io_write_commit()
...
   LASSERT(page_list_sanity_check(obj, queue));
...

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="217192" author="gerrit" created="Tue, 26 Dec 2017 19:57:03 +0000"  >&lt;p&gt;Vladimir Saveliev (c17830@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30659&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30659&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10356&quot; title=&quot;CLIO simplification broke a direct IO sometimes&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10356&quot;&gt;&lt;del&gt;LU-10356&lt;/del&gt;&lt;/a&gt; llite: have ll_write_end to sync for DIO&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 168c0a6bbec75370e14a8ef4cfc25559ecdb9221&lt;/p&gt;</comment>
                            <comment id="217606" author="jay" created="Fri, 5 Jan 2018 18:03:41 +0000"  >&lt;p&gt;It&apos;s a known issue to read/write the same file with mixed cached and direct IO because it would cause data corruption. We could land the patch as you proposed but I don&apos;t think there would be any further actions required.&lt;/p&gt;</comment>
                            <comment id="217607" author="shadow" created="Fri, 5 Jan 2018 18:12:28 +0000"  >&lt;p&gt;Jay,&lt;/p&gt;

&lt;p&gt;it not a mixed IO types from user perspective. It will be good if DIO code will put a pages into single queue and send all data in once. It unlikely to be data corruption - as pages will be locked all time and will removed from mapping immediately after move to the submit queue.&lt;br/&gt;
Other solution - uses same way as NFS does. MM have a launder_page callback to solve problem with read page vs write vs invalidate page race.&lt;/p&gt;

</comment>
                            <comment id="221738" author="gerrit" created="Tue, 27 Feb 2018 03:41:56 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30659/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30659/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10356&quot; title=&quot;CLIO simplification broke a direct IO sometimes&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10356&quot;&gt;&lt;del&gt;LU-10356&lt;/del&gt;&lt;/a&gt; llite: have ll_write_end to sync for DIO&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 6ea9171769db602b3a2b34419bdafacd38454cb4&lt;/p&gt;</comment>
                            <comment id="221769" author="pjones" created="Tue, 27 Feb 2018 04:26:15 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="221813" author="gerrit" created="Tue, 27 Feb 2018 15:57:52 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31432&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31432&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10356&quot; title=&quot;CLIO simplification broke a direct IO sometimes&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10356&quot;&gt;&lt;del&gt;LU-10356&lt;/del&gt;&lt;/a&gt; llite: have ll_write_end to sync for DIO&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 0cbb48ce77e8463a3b10a16b665a5bd2d2cac9e6&lt;/p&gt;</comment>
                            <comment id="225239" author="gerrit" created="Thu, 5 Apr 2018 19:59:11 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31432/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31432/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10356&quot; title=&quot;CLIO simplification broke a direct IO sometimes&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10356&quot;&gt;&lt;del&gt;LU-10356&lt;/del&gt;&lt;/a&gt; llite: have ll_write_end to sync for DIO&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 98dbdb50a9f2bd7ddcd61588106703c4df961c0a&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzp0v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>