<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:56:46 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12916] allow small/unaligned writes into writeback cache if not O_SYNC</title>
                <link>https://jira.whamcloud.com/browse/LU-12916</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Allow small/unaligned writes to be placed directly into pagecache on the OST without starting IO.  This should help IOPS for small writes where we are not limited by pagecache performance as is the case for large writes (where patch &lt;a href=&quot;https://review.whamcloud.com/34422&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34422&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12071&quot; title=&quot;bypass pagecache for large files&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12071&quot;&gt;&lt;del&gt;LU-12071&lt;/del&gt;&lt;/a&gt; osd-ldiskfs: bypass pagecache if requested&lt;/tt&gt;&quot; is trying to do the opposite and &lt;b&gt;avoid&lt;/b&gt; page cache for large writes).&lt;/p&gt;

&lt;p&gt;A reasonable starting point would be 64KiB writes, since this is the upper limit for short writes (data embedded into the BRW RPC request), and beyond this we get good performance for larger writes already (per &lt;a href=&quot;https://www.eofs.eu/_media/events/lad19/05_andreas_dilger-lustre_2.14_and_beyond.pdf&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;testing of flash at different io sizes&lt;/a&gt;, page 8).&lt;/p&gt;</description>
                <environment></environment>
        <key id="57265">LU-12916</key>
            <summary>allow small/unaligned writes into writeback cache if not O_SYNC</summary>
                <type id="4" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11310&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="adilger">Andreas Dilger</reporter>
                        <labels>
                    </labels>
                <created>Tue, 29 Oct 2019 23:03:20 +0000</created>
                <updated>Mon, 22 Jan 2024 15:49:53 +0000</updated>
                                                                                <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="267928" author="adilger" created="Sat, 18 Apr 2020 04:36:24 +0000"  >&lt;p&gt;Alex Zhuravlev (bzzz@whamcloud.com) uploaded a patch: &lt;a href=&quot;https://review.whamcloud.com/36596&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36596&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd: use writeback cache in ldiskfs&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 18&lt;br/&gt;
Commit: eb24c28a61953800c5dd9f382620d573e99f1f43&lt;/p&gt;</comment>
                            <comment id="271408" author="gerrit" created="Thu, 28 May 2020 13:44:46 +0000"  >&lt;p&gt;Alex Zhuravlev (bzzz@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/38747&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/38747&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd: send data to journal&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: f767bc5689bbdd53ae6ef03e660c7ac18bb4e5a7&lt;/p&gt;</comment>
                            <comment id="275397" author="paf0186" created="Tue, 14 Jul 2020 22:53:55 +0000"  >&lt;p&gt;I thought the original driver for turning off the page cache was maximum IOPS.&#160; I can&apos;t tag him (Only WC users can tag people in WC Jira, for some reason...), but I think Ihara did the benchmarking at the time.&#160; Can someone tag him here?&lt;/p&gt;</comment>
                            <comment id="275413" author="adilger" created="Wed, 15 Jul 2020 03:12:27 +0000"  >&lt;p&gt;Patrick, there are two different goals/needs here. There is a need to write directly into page cache for poorly formed workloads like IO500 ior-hard-write, because the writes are not page aligned, and doing synchronous read-modify-write for sub-page IO size is very expensive, even with a block-level write cache in front of NVMe. &lt;/p&gt;

&lt;p&gt;We&apos;ve already instituted a dynamic cache at the OST level, where large aligned writes are never cached, and page cache is disabled for non-rotational devices, but we need to re-enable page cache for those badly-formed writes. &lt;/p&gt;</comment>
                            <comment id="369653" author="qian_wc" created="Mon, 17 Apr 2023 14:40:08 +0000"  >&lt;p&gt;Hi Alex,&lt;br/&gt;
I am studying your patch (&lt;a href=&quot;https://review.whamcloud.com/36596&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36596&lt;/a&gt;) recently. It seems that your patch used delayed allocation with write-back in order to obtain better performance.&lt;/p&gt;

&lt;p&gt;According to the description in URL: &lt;a href=&quot;https://lwn.net/Articles/322823/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://lwn.net/Articles/322823/&lt;/a&gt;&lt;/p&gt;
&lt;blockquote&gt;&lt;p&gt;&quot;Delayed allocation&quot; means that the filesystem tries to delay the allocation of physical disk blocks for written data for as long as possible. This policy brings some important performance benefits. Many files are short-lived; delayed allocation can keep the system from writing fleeting temporary files to disk at all. And, for longer-lived files, delayed allocation allows the kernel to accumulate more data and to allocate the blocks for data contiguously, speeding up both the write and any subsequent reads of that data. It&apos;s an important optimization which is found in most contemporary filesystems.&lt;/p&gt;&lt;/blockquote&gt;
&lt;blockquote&gt;&lt;p&gt;But, if blocks have not been allocated for a file, there is no need to write them quickly as a security measure. Since the blocks do not yet exist, it is not possible to read somebody else&apos;s data from them. So ext4 will not (cannot) write out unallocated blocks as part of the next journal commit cycle. Those blocks will, instead, wait until the kernel decides to flush them out; at that point, physical blocks will be allocated on disk and the data will be made persistent. The kernel doesn&apos;t like to let file data sit unwritten for too long, but it can still take a minute or so (with the default settings) for that data to be flushed - far longer than the five seconds normally seen with ext3. And that is why a crash can cause the loss of quite a bit more data when ext4 is being used.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Delayed allocation is a good feature: &quot;delayed allocation allows the kernel to accumulate more data and to allocate the blocks for data contiguously, speeding up both the write&quot;&lt;br/&gt;
But I am afraid we can not use delayed allocation in write-back support for our OSD-ldiskfs...&lt;br/&gt;
&quot;Ext4 will not (cannot) write out unallocated blocks as part of the next journal commit cycle...&quot;&lt;br/&gt;
Thus during the journal commit, the page with unallocated blocks is not committed into the stable disk...&lt;br/&gt;
If the last_committed &amp;gt; transno of the bulk write on the client, we may wrongly release the unstable page of the write RPC on the client which may be not stable on the storage on the server...&lt;/p&gt;

&lt;p&gt;So I think we still need to allocate blocks for each write page on OST, after that we can write each page in write-back way with ordered-journal mode.&lt;br/&gt;
I have no idea how it will help with the performance.&lt;br/&gt;
But I expect it can improve the performance of ior-hard-write (combined with Partick&apos;s unaligned DIO patch) and mdtest-hard-write(combined with my patch &lt;a href=&quot;https://review.whamcloud.com/#/c/fs/lustre-release/+/49342/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/fs/lustre-release/+/49342/&lt;/a&gt;).&lt;/p&gt;

&lt;p&gt;I could help to refine your patch if you are busy. However, I am not very familiar with Ext4, you are the expert in ldiskfs, could you please give your professional opinion?&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
Qian&lt;/p&gt;

</comment>
                            <comment id="369861" author="gerrit" created="Wed, 19 Apr 2023 07:43:49 +0000"  >&lt;p&gt;&quot;Qian Yingjin &amp;lt;qian@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/50687&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/50687&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd: use writeback for small writes in ldiskfs&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b6c208f1bd2c220e926f555ad78d4331b911231c&lt;/p&gt;</comment>
                            <comment id="371919" author="gerrit" created="Thu, 11 May 2023 09:47:35 +0000"  >&lt;p&gt;&quot;Qian Yingjin &amp;lt;qian@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/50940&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/50940&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd-ldiskfs: check and submit good full write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a4f26530367f040ba148acecd198a6ea13541701&lt;/p&gt;</comment>
                            <comment id="372547" author="gerrit" created="Wed, 17 May 2023 08:54:21 +0000"  >&lt;p&gt;&quot;Qian Yingjin &amp;lt;qian@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/51033&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/51033&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd-ldiskfs: add delayed allocation support&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a3eae66329bc90bf6b00a1ba45b69ac63c1157f7&lt;/p&gt;</comment>
                            <comment id="372930" author="gerrit" created="Fri, 19 May 2023 15:43:05 +0000"  >&lt;p&gt;&quot;Qian Yingjin &amp;lt;qian@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/51063&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/51063&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd-ldiskfs: detect good extent via extent tree&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8365ad9fec435b712d61469102b6f52261898a1e&lt;/p&gt;</comment>
                            <comment id="373771" author="gerrit" created="Tue, 30 May 2023 09:31:04 +0000"  >&lt;p&gt;&quot;Qian Yingjin &amp;lt;qian@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/51159&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/51159&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12916&quot; title=&quot;allow small/unaligned writes into writeback cache if not O_SYNC&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12916&quot;&gt;LU-12916&lt;/a&gt; osd-ldiskfs: use workqueue to write good extent&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 5f1f2e1dfbd5a18233302fdc7254649c461f5dfc&lt;/p&gt;</comment>
                            <comment id="393383" author="sihara" created="Fri, 17 Nov 2023 10:51:47 +0000"  >&lt;p&gt;master&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[RESULT] ior-hard-write 5.368100 GiB/s : time 313.086 seconds
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;master + &lt;a href=&quot;https://review.whamcloud.com/#/c/fs/lustre-release/+/51159/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/fs/lustre-release/+/51159/&lt;/a&gt; (patch set6)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[RESULT] ior-hard-write 8.027850 GiB/s : time 319.704 seconds
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;the patch demonstrated 50% performance improvements today. After patch, we are seeing very good aligned 1M IOs to disks, but it causes memory pressures on OSSs. When it happens, no more &#160;aligned IOs, and the performance down in the end.&lt;br/&gt;
I will give more detail stats to compare memory pressure vs performance, but patch still needs to release and reclaim after flsuh dirty data to disks. it should be able to keep nice aligned IOs to disks as much as possible.&lt;/p&gt;</comment>
                            <comment id="393456" author="adilger" created="Sat, 18 Nov 2023 06:48:25 +0000"  >&lt;p&gt;i wrote in email:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;I was thinking that it would be possible to drop the pages from page cache right after a large IO was submitted. That would leave more RAM available to aggregate small writes in cache until they can form a complete write, and avoid memory pressure. &lt;/p&gt;

&lt;p&gt;We might be able to do this at the osd-ldiskfs level, but it might need to patch the write completion callback to drop the pages from cache? That should be conditional on some flag on the inode or something so that we can control whether the writes are cached or not (eg. by size or tunable parameter).&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yingjin replied:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;We have already implemented this for full stripe write in the patch (&lt;a href=&quot;https://review.whamcloud.com/51159/):&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/51159/):&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
osd_da_full_write_workfn()
       &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (osd-&amp;gt;od_range_delalloc_drop_cache &amp;amp;&amp;amp; rc == 0 &amp;amp;&amp;amp;
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;    nr_pages == mpd.locked_index + 1) {
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;pgoff_t end = gstart + nr_pages - 1;
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; err;

&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;rc = filemap_fdatawait_range(mapping, gstart &amp;lt;&amp;lt; PAGE_SHIFT,
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;      end &amp;lt;&amp;lt; PAGE_SHIFT);
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc &amp;lt; 0) {
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;Wait writeback range failed: rc = %d\n&quot;&lt;/span&gt;, rc);
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;GOTO(out_iput, rc);
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;}

&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;err = invalidate_inode_pages2_range(mapping, gstart, end);
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (err &amp;lt; 0)
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;failed to invalidate pages: rc = %d\n&quot;&lt;/span&gt;, err);
&#8194;&#8194;&#8194;&#8194;&#8194;&#8194;}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;We use Ext4 extent_status tree to track the delayed allocation extents for a file.&lt;br/&gt;
When a write in the I/O service thread with delayed allocation can form a good full stripe extent I/O, which means the extent_status extent after merge contains a full I/O extent where the offset and size in this full I/O extent are all 1MiB aligned, then we will launch a extra work queue thread to do dirty page flush in this I/O range.&lt;br/&gt;
In this work queue thread, it will first flush this full stripe I/O. And if the osd-&lt;tt&gt;ldiskfs.&amp;#42;.range_delalloc_drop_cache=1&lt;/tt&gt;, then it will wait the full stripe I/O to finish and discard the pages from cache.&lt;/p&gt;

&lt;p&gt;However, under memory pressure with writeback enabled on OSD, the kernel will trigger page reclaim. It will call ext4|ldiskfs_writepages() to write out the dirty page, which may destroy the full extent I/O, resulting in lots of small I/O to the RAID disk system.&lt;br/&gt;
Thus I think we may still need to patch the ldiskfs in ext4/ldiskfs_writepages(), add a flag to the inode with delalloc to indicate that it needs to wait for writeback finished and drop the cache to mitigate the memroy pressure,  thus avoid the small I/O casued by kernel page reclaim mechanism as much as possible, reduce its impact on the performance.&lt;/p&gt;&lt;/blockquote&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="59962">LU-13786</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00opz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>