<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:16:34 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1431] Support for larger than 1MB sequential I/O RPCs</title>
                <link>https://jira.whamcloud.com/browse/LU-1431</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Currently Lustre maximum buffer size for a RPC sending I/O is 1MB. This work looks to change the amount of data transfer to allow the data sent to be a size to achieve peak performance with large I/O transfers to the back end disk. Also an additional benefit is the reduction in the round trip time to send the data.&lt;/p&gt;</description>
                <environment></environment>
        <key id="14521">LU-1431</key>
            <summary>Support for larger than 1MB sequential I/O RPCs</summary>
                <type id="2" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11311&amp;avatarType=issuetype">New Feature</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="pjones">Peter Jones</assignee>
                                    <reporter username="simmonsja">James A Simmons</reporter>
                        <labels>
                            <label>performance</label>
                    </labels>
                <created>Tue, 22 May 2012 11:55:10 +0000</created>
                <updated>Tue, 8 Dec 2020 05:50:17 +0000</updated>
                            <resolved>Tue, 14 May 2013 15:34:52 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>19</watches>
                                                                            <comments>
                            <comment id="39203" author="simmonsja" created="Tue, 22 May 2012 12:35:06 +0000"  >&lt;p&gt;Initial patch at &lt;a href=&quot;http://review.whamcloud.com/#change,2872&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,2872&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="39235" author="nrutman" created="Tue, 22 May 2012 16:14:50 +0000"  >&lt;p&gt;Thanks James.  I have asked Sergii to keep this bug updated with our progress.&lt;/p&gt;</comment>
                            <comment id="46011" author="adilger" created="Thu, 4 Oct 2012 14:41:12 +0000"  >&lt;p&gt;Nathan, Shadow,&lt;br/&gt;
any update on this ticket?  The last time we spoke about this, the patch from James wasn&apos;t working properly due to bad interaction with the 1MB bulk readdir RPCs on the MDS, and some other issues.&lt;/p&gt;

&lt;p&gt;If you have a patch that allows larger than 1MB bulk RPCs, it would be great to refresh &lt;a href=&quot;http://review.whamcloud.com/2872&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/2872&lt;/a&gt; with a working patch.  In order to avoid potential issues with performance decrease under some workloads, it might make sense to land the patch initially with the default maximum bulk RPC size still at 1MB.  This will allow users to test with large RPCs (load test and performance) and provide feedback, with minimal risk.  Alternately, if you have some performance metrics that compare 1MB/4MB performance under different loads (FPP, SSF, single client, many clients) that would be great.&lt;/p&gt;</comment>
                            <comment id="46058" author="nrutman" created="Fri, 5 Oct 2012 14:06:56 +0000"  >&lt;p&gt;Xyratex &lt;a href=&quot;http://jira-nss.xy01.xyratex.com:8080/browse/MRP-319&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;MRP-319&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="46060" author="nrutman" created="Fri, 5 Oct 2012 14:26:40 +0000"  >&lt;p&gt;This should be finished up in the next two weeks.&lt;/p&gt;

&lt;p&gt;We split it into a second cleanup patch:&lt;br/&gt;
    MRP-687 PTLRPC_BRW_MAX_SIZE usage cleanup.&lt;/p&gt;

&lt;p&gt;    Clean-up the layering in current code by eliminating direct usage of&lt;br/&gt;
    PTLRPC_BRW_MAX_SIZE macro outside of the ptlrpc module. This should help&lt;br/&gt;
    us acheive &quot;floating&quot; max brw size value across the cluster, which in&lt;br/&gt;
    turn should help with 4MB IO task.&lt;/p&gt;</comment>
                            <comment id="46062" author="adilger" created="Fri, 5 Oct 2012 15:01:04 +0000"  >&lt;p&gt;Nathan, thanks for the update.&lt;/p&gt;

&lt;p&gt;Splitting the patch up definitely makes sense, since I recall there are a number of places that don&apos;t differentiate between PTLRPC_BRW_MAX_SIZE and cl_max_pages_per_rpc properly.&lt;/p&gt;

&lt;p&gt;I assume you know the MRP-nnn URLs don&apos;t work outside of the Xyratex intranet, which is fine as long as the LU ticket gets updated with relevant information when the patches are submitted.&lt;/p&gt;</comment>
                            <comment id="46903" author="deen" created="Thu, 25 Oct 2012 11:02:46 +0000"  >&lt;p&gt;Andreas,&lt;/p&gt;

&lt;p&gt;For the second patch (the actual one that changes BRW size to 4MB) we need a new connect flag OBD_CONNECT_MULTIBULK. Comment in lustre_idl.h suggests that such changes must be approved by senior engineers before even sending the patch that reserves it for future use. So, I&apos;m asking you for the approval and will push review request right after it. Thanks.&lt;/p&gt;</comment>
                            <comment id="46948" author="adilger" created="Fri, 26 Oct 2012 04:40:34 +0000"  >&lt;p&gt;Sergii, could you please explain more fully why the new connect flag is needed.  I thought there was already an existing OBD_CONNECT_BRW_SIZE feature which allows the client and server to negotiate the maximum BRW RPC size already...&lt;/p&gt;

&lt;p&gt;As for requesting a flag assignment, this is to avoid conflicting users of OBD_CONNECT flags, which would render the feature bits useless.&lt;/p&gt;</comment>
                            <comment id="47637" author="simmonsja" created="Fri, 9 Nov 2012 09:24:54 +0000"  >&lt;p&gt;Will this patch be submitted to Gerrit soon for inspection and testing?&lt;/p&gt;</comment>
                            <comment id="48918" author="adilger" created="Fri, 7 Dec 2012 13:44:25 +0000"  >&lt;p&gt;Could you please update this bug with the current status of the patch.  &lt;/p&gt;</comment>
                            <comment id="48944" author="shadow" created="Sat, 8 Dec 2012 12:18:49 +0000"  >&lt;p&gt;We have finished internal inspection - I will ask deen to upload last version in Monday.&lt;/p&gt;</comment>
                            <comment id="49490" author="pjones" created="Thu, 20 Dec 2012 10:16:06 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#change,4876&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,4876&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="49491" author="deen" created="Thu, 20 Dec 2012 10:19:31 +0000"  >&lt;p&gt;PTLRPC_MAX_BRW_SIZE cleanup: &lt;a href=&quot;http://review.whamcloud.com/#change,4876&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,4876&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;1. Instead of using one PTLRPC_MAX_BRW_SIZE all over the place, introduce MD_MAX_BRW_SIZE, OSC_MAX_BRW_SIZE, FILTER_MAX_BRW_SIZE (should it be OFD_MAX_BRW_SIZE now?) and auxiliary ONE_MB_BRW_SIZE.&lt;br/&gt;
ptlrpc still uses its own PTLRPC_MAX_BRW_SIZE, while other subsystems now use their corresponding macros. The actual 4MB IO patch will change only PTLRPC_MAX_BRW_SIZE and OSC_MAX_BRW_SIZE to 4MB, leaving other subsystems intact.&lt;/p&gt;

&lt;p&gt;2. From the original 4MB IO patch (bz16900), take in code which embeds obd_connect_data into obd_export in order to store the other node&apos;s ocd_brw_size value. The idea is to have a &quot;floating&quot; brw size across the cluster: during connection, nodes need to decide on the suitable brw size for both of them, which is min(node1_brw_size, node2_brw_size).&lt;/p&gt;</comment>
                            <comment id="49552" author="deen" created="Fri, 21 Dec 2012 11:46:56 +0000"  >&lt;p&gt;Updated patch has been pushed to Gerrit.&lt;/p&gt;</comment>
                            <comment id="50298" author="deen" created="Thu, 10 Jan 2013 18:39:23 +0000"  >&lt;p&gt;The actual 4MB I/O patch: &lt;a href=&quot;http://review.whamcloud.com/#change,4993&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,4993&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="50299" author="deen" created="Thu, 10 Jan 2013 18:44:23 +0000"  >&lt;p&gt;As part of the task, I&apos;ve developed a couple of tests. The first one, test_231a, checks that in case of a single 4MB transfer only one BRW RPC is sent. It depends on the information from /proc/fs/lustre/obdfilter/$OST/brw_stats and it seems that there are some issues with this statiscics in your codebase, because despite the fact of the I/O the file is empty (only rows/columns names are shown). I haven&apos;t looked into this yet, so maybe you will have some ideas. Thanks.&lt;/p&gt;</comment>
                            <comment id="50406" author="deen" created="Mon, 14 Jan 2013 08:38:40 +0000"  >&lt;p&gt;Andreas, thank you for the hint regarding osd-ldiskfs. I&apos;ve fixed the test and have just pushed an updated patch to the Gerrit. It works on my local setup, so I think there will be no problems.&lt;br/&gt;
So, the current state of the task is that both patches successfully pass all the tests (Well, to be 100% sure we need to wait for the test results for the latest push, but I&apos;m pretty sure that there will be no issues).  &lt;/p&gt;</comment>
                            <comment id="51468" author="green" created="Wed, 30 Jan 2013 13:27:14 +0000"  >&lt;p&gt;I see that these patches fail testing at random places that never failed before, so this is worrysome.&lt;br/&gt;
What sort of testing did these patches see on your end I wonder?&lt;/p&gt;</comment>
                            <comment id="51475" author="deen" created="Wed, 30 Jan 2013 15:13:01 +0000"  >&lt;p&gt;I don&apos;t think that these patches fail at random places completely. The latest test runs for both patches fail in common places: replay-single and sanity-quota.&lt;/p&gt;

&lt;p&gt;As for the testing, the thing is that both patches differ from our original ones due to requests from Andreas, so I don&apos;t think that our testing results for the original patches are relevant.&lt;/p&gt;</comment>
                            <comment id="51489" author="nrutman" created="Wed, 30 Jan 2013 18:44:54 +0000"  >&lt;p&gt;Sergii, please see if you can reproduce the failures; it&apos;s our responsibility to fix any problems here.&lt;/p&gt;</comment>
                            <comment id="51550" author="adilger" created="Thu, 31 Jan 2013 12:33:22 +0000"  >&lt;p&gt;I noticed that the llite readahead window increment (RAS_INCREASE_STEP) was also based on PTLRPC_MAX_BRW_SIZE, but this is too large for PTLRPC_MAX_BRW_SIZE of 32MB when the actual cl_max_pages_per_rpc is only 1MB.  Instead, limit the readahead window growth to match the inode-&amp;gt;i_blkbits (current default min(PTLRPC_MAX_BRW_SIZE * 2, 4MB)), which is still reasonable regardless of the blocksize.  This also allows tuning the readahead on a per-inode basis in the future, depending on which OSTs the file is striped over, by fixing the i_blkbits value.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/5230&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5230&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="51777" author="adilger" created="Tue, 5 Feb 2013 04:16:14 +0000"  >&lt;p&gt;Deen, sorry about the problems with the 32MB RPC size.  That would have allowed us much better flexibility for testing and updated hardware in the future.  As stated in the patch, there are a number of issues that still need to be addressed:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Sorry, it seems that the 32MB RPC size is too large to handle with the current code. Sorry for the confusion. It makes sense to revert PTLRPC_MAX_BRW_SIZE to 4MB for this patch, and we can resolve the problems with larger RPC size in follow-on patches.&lt;/p&gt;

&lt;p&gt;There are a number of problems found at 32MB that can be fixed independently:&lt;/p&gt;

&lt;p&gt;    osd_thread_info.osd_iobuf.dr_blocks[] is 512kB&lt;br/&gt;
    osd_thread_info.osd_iobuf.dr_pages[] is 64kB&lt;br/&gt;
    osd_thread_info.oti_created[] is 32kB and is unused and can be removed&lt;br/&gt;
    oti_thread_info uses OBD_ALLOC() instead of OBD_ALLOC_LARGE()&lt;br/&gt;
    all OST RPC threads allocate the same large OST_MAXREQSIZE buffers, but this is only needed for the OST_IO_PORTAL&lt;br/&gt;
    osd_thread_info.osd_iobuf is only needed for OST_IO_PORTAL and does not need to be allocated for other threads&lt;br/&gt;
    with the larger RPC buffers, there should be fewer total buffers allocated, see comments in &lt;a href=&quot;http://review.whamcloud.com/4940&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4940&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;The current test result for this patch shows good improvement on FPP write, but a net loss for other IO loads.&lt;br/&gt;
Do you have any IO performance data that confirms or contradicts the below results?&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;IOR 	Single-shared file			
Date	RPC size	clients	write	read
2013/02/03	4MB	105	7153	8200
2013/02/03	1mb	105	7996	9269
				
				
IOR 	File-per-process			
Date	RPC size	clients	write	read
2013/02/03	4MB	105	9283	6000
2013/02/03	1mb	106	7233	6115
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;If this is the case, we could still e.g. default to sending 4MB write RPCs if there is enough data in cache and the client holds an exclusive DLM lock.&lt;/p&gt;</comment>
                            <comment id="58456" author="adilger" created="Tue, 14 May 2013 15:34:52 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LUDOC-80&quot; title=&quot;4MB RPC Doc Changes&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LUDOC-80&quot;&gt;&lt;del&gt;LUDOC-80&lt;/del&gt;&lt;/a&gt; landed, closing bug.&lt;/p&gt;</comment>
                            <comment id="60021" author="artem_blagodarenko" created="Wed, 5 Jun 2013 11:59:06 +0000"  >&lt;p&gt;Our testing system shows, that there is failed test eplay-ost-single.test_5&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Lustre: DEBUG MARKER: == replay-ost-single test 5: Fail OST during iozone == 21:21:13 (1369851673)
Lustre: Failing over lustre-OST0000
LustreError: 11-0: an error occurred &lt;span class=&quot;code-keyword&quot;&gt;while&lt;/span&gt; communicating with 0@lo. The ost_write operation failed with -19
LustreError: Skipped 1 previous similar message
Lustre: lustre-OST0000-osc-ffff8800514d3400: Connection to lustre-OST0000 (at 0@lo) was lost; in progress operations using &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; service will wait &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; recovery to complete
Lustre: Skipped 1 previous similar message
Lustre: lustre-OST0000: shutting down &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; failover; client state will be preserved.
Lustre: OST lustre-OST0000 has stopped.
Lustre: server umount lustre-OST0000 complete
LustreError: 137-5: UUID &lt;span class=&quot;code-quote&quot;&gt;&apos;lustre-OST0000_UUID&apos;&lt;/span&gt; is not available &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; connect (no target)
LustreError: Skipped 1 previous similar message
LDISKFS-fs (loop1): mounted filesystem with ordered data mode. Opts: 
LDISKFS-fs (loop1): mounted filesystem with ordered data mode. Opts: 
Lustre: 16962:0:(ldlm_lib.c:2195:target_recovery_init()) RECOVERY: service lustre-OST0000, 2 recoverable clients, last_transno 1322
Lustre: lustre-OST0000: Now serving lustre-OST0000 on /dev/loop1 with recovery enabled
Lustre: 2398:0:(ldlm_lib.c:1021:target_handle_connect()) lustre-OST0000: connection from lustre-MDT0000-mdtlov_UUID@0@lo recovering/t0 exp ffff88005ca19c00 cur 1369851700 last 1369851697
Lustre: 2398:0:(ldlm_lib.c:1021:target_handle_connect()) Skipped 3 previous similar messages
Lustre: lustre-OST0000: Will be in recovery &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; at least 1:00, or until 2 clients reconnect
Lustre: lustre-OST0000: Recovery over after 0:01, of 2 clients 2 recovered and 0 were evicted.
Lustre: lustre-OST0000-osc-MDT0000: Connection restored to lustre-OST0000 (at 0@lo)
Lustre: Skipped 1 previous similar message
LustreError: 1716:0:(osc_request.c:1232:check_write_rcs()) Unexpected # bytes transferred: 65536 (requested 32768)
LustreError: 1716:0:(osc_request.c:1232:check_write_rcs()) Unexpected # bytes transferred: 2097152 (requested 1048576)
Lustre: lustre-OST0000: received MDS connection from 0@lo
Lustre: MDS mdd_obd-lustre-MDT0000: lustre-OST0000_UUID now active, resetting orphans
Lustre: DEBUG MARKER: iozone rc=1
Lustre: DEBUG MARKER: replay-ost-single test_5: @@@@@@ FAIL: iozone failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;This messages looks related to 4mb IO patch&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;LustreError: 1716:0:(osc_request.c:1232:check_write_rcs()) Unexpected # bytes transferred: 65536 (requested 32768)
LustreError: 1716:0:(osc_request.c:1232:check_write_rcs()) Unexpected # bytes transferred: 2097152 (requested 1048576)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I believe, that this test is failed in intel&apos;s master branch, but they skip it as SLOW during testing&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/dd033a98-7264-11e2-aad1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/dd033a98-7264-11e2-aad1-52540035b04c&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;test_5	SKIP	0	0	skipping SLOW test 5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Could you, please, start this test (it marked as SLOW) and check if it failed?&lt;/p&gt;</comment>
                            <comment id="60022" author="pjones" created="Wed, 5 Jun 2013 12:11:18 +0000"  >&lt;p&gt;Artem&lt;/p&gt;

&lt;p&gt;Could you please open a new ticket for this failure so we can track it?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="60025" author="artem_blagodarenko" created="Wed, 5 Jun 2013 12:19:02 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3438&quot; title=&quot;replay-ost-single test_5 failed with error int check_write_rcs() &amp;quot;Unexpected # bytes transferred&amp;quot;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3438&quot;&gt;&lt;del&gt;LU-3438&lt;/del&gt;&lt;/a&gt; is created.&lt;/p&gt;</comment>
                            <comment id="60026" author="pjones" created="Wed, 5 Jun 2013 12:21:39 +0000"  >&lt;p&gt;Thanks Artem!&lt;/p&gt;</comment>
                            <comment id="60645" author="artem_blagodarenko" created="Fri, 14 Jun 2013 12:26:18 +0000"  >&lt;p&gt;Xyratex-bug-id: &lt;a href=&quot;http://jira-nss.xy01.xyratex.com:8080/browse/MRP-687&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;MRP-687 &lt;/a&gt;&lt;br/&gt;
Xyratex-bug-id: &lt;a href=&quot;http://jira-nss.xy01.xyratex.com:8080/browse/MRP-319&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;MRP-319 &lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="15918">LUDOC-80</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="16844">LU-2424</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="22860">LU-4533</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="18787">LU-3308</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17522">LU-2791</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17582">LU-2816</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17127">LU-2598</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17521">LU-2790</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="19289">LU-3438</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17439">LU-2748</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="15918">LUDOC-80</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                            <subtask id="17340">LU-2702</subtask>
                            <subtask id="17454">LU-2756</subtask>
                            <subtask id="17439">LU-2748</subtask>
                    </subtasks>
                <customfields>
                                                                                                                                    <customfield id="customfield_10020" key="com.atlassian.jira.plugin.system.customfieldtypes:float">
                        <customfieldname>Bugzilla ID</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>16900.0</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10040" key="com.atlassian.jira.plugin.system.customfieldtypes:labels">
                        <customfieldname>Epic</customfieldname>
                        <customfieldvalues>
                                        <label>lnet</label>
    
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzv3br:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>4038</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>