<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:19:25 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1757] Short I/O support</title>
                <link>https://jira.whamcloud.com/browse/LU-1757</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Perform short I/O (requests &amp;lt;= 4k) w/o bulk RPC.&lt;/p&gt;</description>
                <environment></environment>
        <key id="15506">LU-1757</key>
            <summary>Short I/O support</summary>
                <type id="4" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11310&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="paf">Patrick Farrell</assignee>
                                    <reporter username="aboyko">Alexander Boyko</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Thu, 16 Aug 2012 04:42:02 +0000</created>
                <updated>Sat, 2 Nov 2019 14:02:48 +0000</updated>
                            <resolved>Fri, 22 Dec 2017 12:45:41 +0000</resolved>
                                                    <fixVersion>Lustre 2.11.0</fixVersion>
                    <fixVersion>Lustre 2.12.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>15</watches>
                                                                            <comments>
                            <comment id="43319" author="aboyko" created="Thu, 16 Aug 2012 05:12:03 +0000"  >&lt;p&gt;req &lt;a href=&quot;http://review.whamcloud.com/3690&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3690&lt;/a&gt;&lt;br/&gt;
Test results (seconds, less is better)&lt;/p&gt;

&lt;div class=&apos;table-wrap&apos;&gt;
&lt;table class=&apos;confluenceTable&apos;&gt;&lt;tbody&gt;
&lt;tr&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;Test case&lt;/th&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;Test script essence&lt;/th&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;Short I/O&lt;/th&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;non Short I/O&lt;/th&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;Write in each page&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;dd of=$TARGET bs=4096 count=100000 oflag=direct&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;48.1s&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;52.1s&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;mmap I/O&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;multiop $TARGET OsMRUc&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;98s&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;99.8s&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;Non-paged read&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;dd if=$TARGET bs=2048 count=100 skip=$offset&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;32.4s&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;34.8s&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
&lt;/div&gt;
</comment>
                            <comment id="43725" author="adilger" created="Fri, 24 Aug 2012 05:29:29 +0000"  >&lt;p&gt;Thanks, I was just looking in this bug to see if there were any kind of performance results.&lt;/p&gt;

&lt;p&gt;The improvement isn&apos;t quite as good as I was hoping to see (i.e. only a few percent faster instead of 2-3x faster).  Do you have any idea on where there are other performance bottlenecks for this use case?  What is the performance of these tests on the local OST filesystem?&lt;/p&gt;</comment>
                            <comment id="43730" author="aboyko" created="Fri, 24 Aug 2012 08:06:02 +0000"  >&lt;p&gt;(52.1-48.1)*100/52.1=7.677543186 ~ 7% from total dd operation. I think this is not so bad. May be we need to compare ost_brw_write by time stamps to exclude other Lustre overhead. For short io bulk transfer was changed to memcpy at both sides client/server so we have bulk vs memcpy time.&lt;/p&gt;</comment>
                            <comment id="45245" author="pjones" created="Thu, 20 Sep 2012 00:43:07 +0000"  >&lt;p&gt;Landed for 2.3 and 2.4&lt;/p&gt;</comment>
                            <comment id="45253" author="adilger" created="Thu, 20 Sep 2012 04:53:55 +0000"  >&lt;p&gt;Peter, only the reservation of the feature flag has landed, not the actual code to implement it. &lt;/p&gt;</comment>
                            <comment id="46390" author="pjones" created="Thu, 11 Oct 2012 13:32:08 +0000"  >&lt;p&gt;Landed for 2.4&lt;/p&gt;</comment>
                            <comment id="46497" author="jfilizetti" created="Fri, 12 Oct 2012 20:05:21 +0000"  >&lt;p&gt;Peter, the cherry picked patch that was added b2_1, b2_3 and master was only for the connect flags to reserve them, the full patch still doesn&apos;t appear to be landed.  If it is can you provide the commit because I can&apos;t find it?&lt;/p&gt;</comment>
                            <comment id="46505" author="pjones" created="Sat, 13 Oct 2012 00:55:03 +0000"  >&lt;p&gt;Ah yes I think that you are right Jeremy - thanks!&lt;/p&gt;</comment>
                            <comment id="46535" author="adilger" created="Sun, 14 Oct 2012 16:21:19 +0000"  >&lt;p&gt;Jeremy, if you (or someone you know) have the ability to do so, it would be great to get some performance benchmarks on this patch over high-latency links.  As it stands, getting only a few percent improvement for small IO sizes (7.7MB/s to 8.3MB/s) isn&apos;t compelling.&lt;/p&gt;

&lt;p&gt;Alexander, what was the back-end storage used for this test?  If it was a disk, then the IOPS rate would be the limiting factor, though 100000k writes in 52s is about 2000 IOPS, so probably a RAID-10 array or SSD?  While I think that this could help the performance, I suspect that a closer investigation of where the actual overhead lies would help.  Is there a need for more RPCs in flight with small IOs?  Is the latency in the server stack or RPC handling?&lt;/p&gt;</comment>
                            <comment id="46587" author="adilger" created="Mon, 15 Oct 2012 14:17:33 +0000"  >&lt;p&gt;Eric, I recall you having some thoughts about this.  The current patch limits the bulk request size to be &amp;lt;= one page of data (+ overhead), which isn&apos;t out of line with MDS requests which can have up to 4kB for a symlink or other pathname component.&lt;/p&gt;

&lt;p&gt;I think it is unavoidable that if we want low latency small IOs that they be done without extra round trips, but I would have thought the performance improvement was much better than a few percent...  Perhaps testing against a ramdisk OST would give us a better idea of the upper limit of performance for this patch?&lt;/p&gt;</comment>
                            <comment id="46862" author="aboyko" created="Tue, 23 Oct 2012 13:46:27 +0000"  >&lt;p&gt;I got new test result from ramfs.&lt;/p&gt;

&lt;p&gt;IOR shortio (1 client, IB, ost and mds on the ramfs, 10 runs, average result)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Command line used: IOR -a POSIX -t 4k -b 1G -B -o /mnt/lustre/mmap/mmap
Machine: Linux mrpcli9

Summary:
        api                = POSIX
        test filename      = /mnt/lustre/mmap/mmap
        access             = single-shared-file
        ordering in a file = sequential offsets
        ordering inter file= no tasks offsets
        clients            = 1 (1 per node)
        repetitions        = 1
        xfersize           = 4096 bytes
        blocksize          = 1 GiB
        aggregate filesize = 1 GiB

Operation  Max (MiB)  Min (MiB)  Mean (MiB)   Std Dev  Max (OPs)  Min (OPs)  Mean (OPs)   Std Dev  Mean (s)
---------  ---------  ---------  ----------   -------  ---------  ---------  ----------   -------  --------
write          10.59      10.59       10.59      0.00    2709.96    2709.96     2709.96      0.00  96.73352   EXCEL
read           14.00      14.00       14.00      0.00    3584.71    3584.71     3584.71      0.00  73.12840   EXCEL

Max Write: 10.59 MiB/sec (11.10 MB/sec)
Max Read:  14.00 MiB/sec (14.68 MB/sec)

Run finished: Mon Oct 22 10:31:36 2012

real    2m49.891s
user    0m0.537s
sys     1m12.616s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;IOR without short IO (1 client, IB, ost and mds on the ramfs, 10 runs, average result)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Command line used: IOR -a POSIX -t 4k -b 1G -B -o /mnt/lustre/mmap/mmap
Machine: Linux mrpcli9

Summary:
        api                = POSIX
        test filename      = /mnt/lustre/mmap/mmap
        access             = single-shared-file
        ordering in a file = sequential offsets
        ordering inter file= no tasks offsets
        clients            = 1 (1 per node)
        repetitions        = 1
        xfersize           = 4096 bytes
        blocksize          = 1 GiB
        aggregate filesize = 1 GiB

Operation  Max (MiB)  Min (MiB)  Mean (MiB)   Std Dev  Max (OPs)  Min (OPs)  Mean (OPs)   Std Dev  Mean (s)
---------  ---------  ---------  ----------   -------  ---------  ---------  ----------   -------  --------
write          10.36      10.36       10.36      0.00    2651.19    2651.19     2651.19      0.00  98.87794   EXCEL
read           12.64      12.64       12.64      0.00    3235.79    3235.79     3235.79      0.00  81.01380   EXCEL

Max Write: 10.36 MiB/sec (10.86 MB/sec)
Max Read:  12.64 MiB/sec (13.25 MB/sec)

Run finished: Tue Oct 23 02:12:21 2012

real    2m59.920s
user    0m0.512s
sys     1m9.490s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;dd if=/dev/zero of=$FILE bs=4096 count=300000 oflag=direct (1 client, IB, ost and mds on the ramfs)&lt;br/&gt;
short IO: 113.5-116.0 sec&lt;br/&gt;
no short IO: 116.5-118.5 sec&lt;br/&gt;
multiop $TARGET OsMRUc on 1.2 GB target file (1 client, IB, ost and mds on the ramfs, 10 iterations)&lt;br/&gt;
short IO: 195.6 sec&lt;br/&gt;
no short IO: 199.2 sec&lt;/p&gt;</comment>
                            <comment id="47203" author="adilger" created="Wed, 31 Oct 2012 12:10:30 +0000"  >&lt;p&gt;In talking with Eric, one concern with using inline bulk data is that this can increase the request size enough to cause the routers to use 1MB buffers for handling the short IO requests, and potentially cause the routers to run out of buffers.&lt;/p&gt;

&lt;p&gt;With RDMA RPCs the number of inflight bulk requests is limited by the number if service threads (typically 512*num_osts), but with the inline bulk data the number of inflight requests is much larger (8*num_clients*num_osts).&lt;/p&gt;

&lt;p&gt;In order to avoid consuming all of the large buffers on the routers, either a third pool for 8kB requests is needed (in addition to the 4kB and 1MB pools) or the small request (4kB) pool should be modified to use an 8kB buffer size. &lt;/p&gt;</comment>
                            <comment id="47233" author="morrone" created="Wed, 31 Oct 2012 18:31:59 +0000"  >&lt;p&gt;Alex, if you want to run a test 10 times and get the average, I recommend ior&apos;s &quot;-i&quot; option.  Otherwise its less obvious to others that you did more than eye-ball the numbers and pick a psuedo-average.  With a write performance difference of only 2%, and overall throughput numbers that are so low, it is hard to tell if the results are statistically significant.&lt;/p&gt;</comment>
                            <comment id="48214" author="nrutman" created="Wed, 21 Nov 2012 17:13:47 +0000"  >&lt;p&gt;Xyratex-bug-id: &lt;a href=&quot;http://jira-nss.xy01.xyratex.com:8080/browse/MRP-320&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;MRP-320&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48412" author="jfilizetti" created="Tue, 27 Nov 2012 10:04:33 +0000"  >&lt;p&gt;I&apos;ve tried several times over the past couple weeks to test this patch with master over the WAN but every time I do direct IO read or write I get an LBUG:&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@test tmp&amp;#93;&lt;/span&gt;# dd if=test of=/dev/null bs=4k iflag=direct&lt;/p&gt;

&lt;p&gt;Message from syslogd@test at Nov 27 03:44:12 ...&lt;br/&gt;
 kernel:LustreError: 19403:0:(rw26.c:483:ll_direct_IO_26()) ASSERTION( obj-&amp;gt;cob_transient_pages == 0 ) failed: &lt;/p&gt;

&lt;p&gt;Is this an already known issue with direct IO on master?&lt;/p&gt;
</comment>
                            <comment id="51958" author="shadow" created="Thu, 7 Feb 2013 07:55:49 +0000"  >&lt;p&gt;&amp;gt; In talking with Eric, one concern with using inline bulk data is that this can increase the request size enough to cause the routers to use 1MB buffers for handling the short IO requests, and potentially cause the routers to run out of buffers.&lt;/p&gt;

&lt;p&gt;That is too bad for routers. Routers should be have more then two sizes for request size, anyway we have send a transfer size as part of lnet header. &lt;/p&gt;</comment>
                            <comment id="60719" author="adilger" created="Fri, 14 Jun 2013 22:35:35 +0000"  >&lt;p&gt;I was thinking of another potential area where this short IO could improve performance significantly (and give a good reason to land it), is when many clients are writing to the same object.  Is it possible for you to run a test with multiple clients IOR writing &amp;lt;= 4kB interleaved chunks to the same 1-stripe file?  Ideally this would use server-side locking for the writes, so that there is very minimal contention.  It might even be that submitting smaller IOs (say 32 bytes) would give even more of a boost to this patch, since the client does not need to do read-modify-write for the full-page writes as it does today.&lt;/p&gt;

&lt;p&gt;If this feature can show some significant performance improvements (say 3-4 times faster, though I&apos;d expect possibly much more) then I would be happy to work on getting this this feature landed.&lt;/p&gt;</comment>
                            <comment id="69012" author="adilger" created="Tue, 15 Oct 2013 19:00:49 +0000"  >&lt;p&gt;Alexander, I saw that the patch for this feature is abandoned, however small writes is definitely an area that Lustre could use a considerable amount of improvement.  I&apos;m still hopeful that there may be some workloads that this feature could show significant performance improvements on, or at least show what other work needs to be done in addition to this patch.  The Data-on-MDT work is more concerned with small &lt;em&gt;files&lt;/em&gt; and is definitely orthogonal to this small &lt;em&gt;write&lt;/em&gt; patch which is intended to improve small write RPCs to a potentially very large file.&lt;/p&gt;

&lt;p&gt;It may be that we need to make additional changes in order to see the overall improvement of small files.  Some areas to investigate to see why this patch isn&apos;t showing the expected improvements:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;what is the improvement when the writes are smaller than a single disk block?&lt;/li&gt;
	&lt;li&gt;what is the improvement when multiple clients are doing interleaved writes to the same file?  This can be tested relatively easily with IOR and multiple client nodes (&quot;&lt;tt&gt;ior -w -b 32 -t 32 -s 65536 -N 8 -i 10 -o /mnt/lustre/testfile&lt;/tt&gt;&quot; runs on 8 clients and does 65536 interleaved 32-byte writes per client).&lt;/li&gt;
	&lt;li&gt;what impact does NRS object-based round-robin (ORR) have when doing small writing to a single file?  This should sort the writes by file offset, but it may be that short writes also need to be cached on the OST so that they can avoid synchronous read-modify-write on the disk.  This might be more easily tested with a ZFS OSD, which already does write caching, while the ldiskfs OSD would need changes to the IO path in order to cache small writes.&lt;/li&gt;
	&lt;li&gt;in the shared single interleaved write case, are the clients doing lockless writes?  If not, the lock contention and overhead of doing LDLM enqueue/cancel for each write may easily dominate over the improvement from the small write patch.  For sub-page writes, it might also be that there needs to be some IO fastpath that bypasses the client page cache so that it can avoid read-modify-write for the local page.&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;I suspect that if there were multiple clients doing small writes&lt;/p&gt;</comment>
                            <comment id="70754" author="aboyko" created="Tue, 5 Nov 2013 17:52:08 +0000"  >&lt;p&gt;I want to reserve OBDO flag for short io&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/8182&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8182&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Andreas, right now, I have no time and resources to check your suggestion, and the short io patch is outdated for master and required reworks. Data-on-MDT looks very good for short io and need another patch also.&lt;br/&gt;
We did not do the lockless test for shared files, but single client pages write with oflags=direct  did not show significant improvement.  &lt;/p&gt;</comment>
                            <comment id="199879" author="gerrit" created="Wed, 21 Jun 2017 19:44:43 +0000"  >&lt;p&gt;Patrick Farrell (paf@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27767&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27767&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; brw: add short io osc/ost transfer.&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 01056e12846a73c041da92d8a4f216f2641ca1cc&lt;/p&gt;</comment>
                            <comment id="199880" author="paf" created="Wed, 21 Jun 2017 19:47:00 +0000"  >&lt;p&gt;I&apos;ve resurrected this patch and ported to current master.  Some simple testing here suggests A) it&apos;s working fine, and B) it gives about a 30% performance improvement for direct I/O of appropriate size (I upped the limit to 3 pages, that&apos;s what fits in the RPC, I believe), when I&apos;m reading from a fast storage device (RAM or flash).  When I&apos;m doing small I/O to/from a spinning disk, I see no real improvement - But that&apos;s probably because network latency is not the primary driver of I/O performance there.&lt;/p&gt;</comment>
                            <comment id="200030" author="adilger" created="Thu, 22 Jun 2017 21:06:26 +0000"  >&lt;p&gt;Patrick, have you tested aligned or unaligned reads/writes?  I expect with unaligned multi-client writes and server-side locking that this could also improve performance significantly.&lt;/p&gt;

&lt;p&gt;There could also be a big benefit from bypassing the client-side aggregation and caching mechanisms completely in that case, and just dump the chunks to the OST as fast as possible, and use something like NRS ORR to aggregate the IOs properly on the server, or at least avoid read-modify-write for small writes over the network.&lt;/p&gt;</comment>
                            <comment id="200031" author="paf" created="Thu, 22 Jun 2017 21:25:07 +0000"  >&lt;p&gt;Andreas,&lt;/p&gt;

&lt;p&gt;Aligned, mostly.  Can&apos;t really test unaligned without getting aggregation or readahead, except for random reads.  (Those do well.)&lt;/p&gt;

&lt;p&gt;Server side locking...  How is that achieved, other than with the patch from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4198&quot; title=&quot;Improve IO performance when using DIRECT IO using libaio&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4198&quot;&gt;&lt;del&gt;LU-4198&lt;/del&gt;&lt;/a&gt;, and that only for direct i/o? (&lt;a href=&quot;https://review.whamcloud.com/#/c/8201/20&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/8201/20&lt;/a&gt;)&lt;br/&gt;
And since it&apos;s direct i/o, it has to be aligned.  (Trying to update &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-247&quot; title=&quot;Lustre client slow performance on BG/P IONs: unaligned DIRECT_IO&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-247&quot;&gt;&lt;del&gt;LU-247&lt;/del&gt;&lt;/a&gt; (unaligned dio) is my next project.)&lt;/p&gt;

&lt;p&gt;Also, other than with direct i/o, I&apos;m not sure how to actually achieve small i/os (write aggregation or read ahead will prevent them), except for random reads.  (Which do see a benefit - I didn&apos;t mention that, but they see the same sort of improvement.)&lt;/p&gt;

&lt;p&gt;So, I suppose I would say:&lt;br/&gt;
I want to try all of that, I agree that it would likely benefit enormously (you pointed this out in your earlier comments to this LU) but I believe I need &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-247&quot; title=&quot;Lustre client slow performance on BG/P IONs: unaligned DIRECT_IO&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-247&quot;&gt;&lt;del&gt;LU-247&lt;/del&gt;&lt;/a&gt; to make it really possible, since direct i/o is the only way I know of to A) skip the page cache, B) force small i/o, and C) move the locking to the server (I can fake the effect of that by doing my i/o from one node.)&lt;/p&gt;

&lt;p&gt;Is there some easier route I&apos;ve missed?&lt;/p&gt;</comment>
                            <comment id="213273" author="gerrit" created="Thu, 9 Nov 2017 20:06:52 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/27767/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27767/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; brw: add short io osc/ost transfer.&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 70f092a0587866662735e1a6eaf27701a576370d&lt;/p&gt;</comment>
                            <comment id="213284" author="mdiep" created="Thu, 9 Nov 2017 20:40:56 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="214772" author="adilger" created="Tue, 28 Nov 2017 03:32:58 +0000"  >&lt;p&gt;This was landed for 2.11, but Data-on-MDT landed at the same time. The MDS connection does not support SHORTIO yet, but it should. &lt;/p&gt;</comment>
                            <comment id="215581" author="gerrit" created="Thu, 7 Dec 2017 18:04:58 +0000"  >&lt;p&gt;Patrick Farrell (paf@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30435&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30435&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; brw: Fix short i/o and enable for mdc&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8efc38861cb224d69c012862f6e8ae453b890d17&lt;/p&gt;</comment>
                            <comment id="215583" author="paf" created="Thu, 7 Dec 2017 18:05:58 +0000"  >&lt;p&gt;Original patch did not actually enable this functionality.&lt;/p&gt;</comment>
                            <comment id="217043" author="gerrit" created="Fri, 22 Dec 2017 06:48:52 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30435/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30435/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; brw: Fix short i/o and enable for mdc&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3483e195314bddb8d72594ebb10307c83a4bb860&lt;/p&gt;</comment>
                            <comment id="217108" author="pjones" created="Fri, 22 Dec 2017 12:45:41 +0000"  >&lt;p&gt;Second time lucky?&lt;/p&gt;</comment>
                            <comment id="233576" author="gerrit" created="Sat, 15 Sep 2018 18:48:59 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/33173&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33173&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; osc: clarify short_io_bytes is maximum value&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8e3e67f0cfdec0bb0a96f9e4fc1793fef7558867&lt;/p&gt;</comment>
                            <comment id="234864" author="gerrit" created="Fri, 12 Oct 2018 23:50:10 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/33173/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33173/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1757&quot; title=&quot;Short I/O support&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1757&quot;&gt;&lt;del&gt;LU-1757&lt;/del&gt;&lt;/a&gt; osc: clarify short_io_bytes is maximum value&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: b90812a674f6ebaa9de592a4a4d97a35ed38a24e&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="49050">LU-10176</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="18725">LU-3285</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="57147">LU-12856</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="49410">LU-10264</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="49456">LU-10289</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="45743">LU-9409</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvqav:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8137</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>