<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:20:26 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8775] ZFS OST has an extra 128 byte for each IO</title>
                <link>https://jira.whamcloud.com/browse/LU-8775</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;It seems that, when ZFS is used as the backend file system for Lustre OST, there is an additional 128 byte for each IO. &lt;/p&gt;

&lt;p&gt;For example, write 1GB write to a ZFS based file system. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
# dd if=/dev/zero of=File_1GB count=1024 bs=1M
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 0.869192 s, 1.2 GB/s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kapollo01 ~]# lctl get_param obdfilter.edafs-OST0001.brw_stats
obdfilter.edafs-OST0001.brw_stats=
snapshot_time:         1477281605.706719 (secs.usecs)

                           read      |     write
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %
1:                       0   0   0   | 1024  50  50
2:                       0   0   0   |    0   0  50
4:                       0   0   0   |    0   0  50
8:                       0   0   0   |    0   0  50
16:                      0   0   0   |    0   0  50
32:                      0   0   0   |    0   0  50
64:                      0   0   0   |    0   0  50
128:                     0   0   0   |    0   0  50
256:                     0   0   0   | 1024  50 100

                           read      |     write
discontiguous pages    rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   | 1024  50  50
1:                       0   0   0   |    0   0  50
2:                       0   0   0   |    0   0  50
3:                       0   0   0   |    0   0  50
4:                       0   0   0   |    0   0  50
5:                       0   0   0   |    0   0  50
6:                       0   0   0   |    0   0  50
7:                       0   0   0   |    0   0  50
8:                       0   0   0   |    0   0  50
9:                       0   0   0   |    0   0  50
10:                      0   0   0   |    0   0  50
11:                      0   0   0   |    0   0  50
12:                      0   0   0   |    0   0  50
13:                      0   0   0   |    0   0  50
14:                      0   0   0   |    0   0  50
15:                      0   0   0   |    0   0  50
16:                      0   0   0   |    0   0  50
17:                      0   0   0   |    0   0  50
18:                      0   0   0   |    0   0  50
19:                      0   0   0   |    0   0  50
20:                      0   0   0   |    0   0  50
21:                      0   0   0   |    0   0  50
22:                      0   0   0   |    0   0  50
23:                      0   0   0   |    0   0  50
24:                      0   0   0   |    0   0  50
25:                      0   0   0   |    0   0  50
26:                      0   0   0   |    0   0  50
27:                      0   0   0   |    0   0  50
28:                      0   0   0   |    0   0  50
29:                      0   0   0   |    0   0  50
30:                      0   0   0   |    0   0  50
31:                      0   0   0   | 1024  50 100

                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   | 2039  99  99
2:                       0   0   0   |    9   0 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
128:                     0   0   0   | 1024  50  50
256:                     0   0   0   |    0   0  50
512:                     0   0   0   |    0   0  50
1K:                      0   0   0   |    0   0  50
2K:                      0   0   0   |    0   0  50
4K:                      0   0   0   |    0   0  50
8K:                      0   0   0   |    0   0  50
16K:                     0   0   0   |    0   0  50
32K:                     0   0   0   |    0   0  50
64K:                     0   0   0   |    0   0  50
128K:                    0   0   0   |    0   0  50
256K:                    0   0   0   |    0   0  50
512K:                    0   0   0   |    0   0  50
1M:                      0   0   0   | 1024  50 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On the other side, such 128 bytes additional IO does not exist. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kcli01 ost01]# dd if=/dev/zero of=File_1GB count=1024 bs=1M
1024+0 records in
1024+0 records out
1073741824 bytes (1.1 GB) copied, 1.79642 s, 598 MB/s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@koss02 ~]# lctl get_param obdfilter.krakenfs-OST0001.brw_stats
obdfilter.krakenfs-OST0001.brw_stats=
snapshot_time:         1477297530.705071 (secs.usecs)

                           read      |     write
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %
1K:                      0   0   0   |  256 100 100

                           read      |     write
discontiguous pages    rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   |  256 100 100

                           read      |     write
discontiguous blocks   rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   |  256 100 100

                           read      |     write
disk fragmented I/Os   ios   % cum % |  ios         % cum %
4:                       0   0   0   |  255  99  99
5:                       0   0   0   |    0   0  99
6:                       0   0   0   |    1   0 100

                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   |    4   0   0
2:                       0   0   0   |    5   0   0
3:                       0   0   0   |    5   0   1
4:                       0   0   0   |    5   0   1
5:                       0   0   0   |    2   0   2
6:                       0   0   0   |    2   0   2
7:                       0   0   0   |    1   0   2
8:                       0   0   0   |    1   0   2
9:                       0   0   0   |    1   0   2
10:                      0   0   0   |    1   0   2
11:                      0   0   0   |    1   0   2
12:                      0   0   0   |    1   0   2
13:                      0   0   0   |    3   0   3
14:                      0   0   0   |   11   1   4
15:                      0   0   0   |   23   2   6
16:                      0   0   0   |   44   4  10
17:                      0   0   0   |   81   7  18
18:                      0   0   0   |  118  11  30
19:                      0   0   0   |  144  14  44
20:                      0   0   0   |  133  12  57
21:                      0   0   0   |  104  10  67
22:                      0   0   0   |   72   7  74
23:                      0   0   0   |   45   4  78
24:                      0   0   0   |   40   3  82
25:                      0   0   0   |   42   4  86
26:                      0   0   0   |   42   4  90
27:                      0   0   0   |   37   3  94
28:                      0   0   0   |   29   2  97
29:                      0   0   0   |   16   1  98
30:                      0   0   0   |    8   0  99
31:                      0   0   0   |    5   0 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %
4:                       0   0   0   |    3   1   1
8:                       0   0   0   |    0   0   1
16:                      0   0   0   |   13   5   6
32:                      0   0   0   |  172  67  73
64:                      0   0   0   |   54  21  94
128:                     0   0   0   |   14   5 100

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
512K:                    0   0   0   |    2   0   0
1M:                      0   0   0   | 1024  99 100
 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>ZFS-Lustre: lustre 2.8.57_62_g919224d, zfs-0.7.0&lt;br/&gt;
ldiskfs-lustre: lustre 2.7.16.7</environment>
        <key id="40974">LU-8775</key>
            <summary>ZFS OST has an extra 128 byte for each IO</summary>
                <type id="4" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11310&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="zhiqi">Zhiqi Tao</reporter>
                        <labels>
                    </labels>
                <created>Mon, 24 Oct 2016 05:31:40 +0000</created>
                <updated>Tue, 28 Mar 2017 21:26:08 +0000</updated>
                            <resolved>Sat, 29 Oct 2016 00:23:55 +0000</resolved>
                                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="170706" author="adilger" created="Mon, 24 Oct 2016 07:26:11 +0000"  >&lt;p&gt;There is a ZFS patch that AFAIK Alex was working on to do I/O queue plugging, so that ZFS didn&apos;t submit 1MB writes to disk without giving them a chance to merge.&lt;/p&gt;</comment>
                            <comment id="170707" author="gabriele.paciucci" created="Mon, 24 Oct 2016 07:38:56 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=zhiqi&quot; class=&quot;user-hover&quot; rel=&quot;zhiqi&quot;&gt;zhiqi&lt;/a&gt; I think this is the reason why we suggest 9+2 instead of 8+2 ?&lt;/p&gt;</comment>
                            <comment id="170710" author="jay" created="Mon, 24 Oct 2016 07:56:19 +0000"  >&lt;p&gt;From what I can see in osd_zfs code, it (wrongly) does statistic in osd_write(), which is supposed to be used to update internal data structures such as last_rcvd, llog, etc. ldiskfs only records I/O statistic from client side.&lt;/p&gt;</comment>
                            <comment id="170818" author="zhiqi" created="Mon, 24 Oct 2016 21:02:25 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kcli01 ost01]# dd if=/dev/zero of=File_2 count=1024 bs=100K
1024+0 records in
1024+0 records out
104857600 bytes (105 MB) copied, 0.111276 s, 942 MB/s

[root@kcli01 ost01]# ls -lh File_2
-rw-r--r-- 1 root root 100M Oct 24 14:51 File_2

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It appears that the 128 byte is associated with each 1MB instead of being associated with individual IOs. It&apos; nice to see that Lustre ZFS merged data together and commmitted together.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   |  201 100 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
128:                     0   0   0   |  101  50  50
256:                     0   0   0   |    0   0  50
512:                     0   0   0   |    0   0  50
1K:                      0   0   0   |    0   0  50
2K:                      0   0   0   |    0   0  50
4K:                      0   0   0   |    0   0  50
8K:                      0   0   0   |    0   0  50
16K:                     0   0   0   |    0   0  50
32K:                     0   0   0   |    0   0  50
64K:                     0   0   0   |    0   0  50
128K:                    0   0   0   |    0   0  50
256K:                    0   0   0   |    0   0  50
512K:                    0   0   0   |    0   0  50
1M:                      0   0   0   |  100  49 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="170819" author="zhiqi" created="Mon, 24 Oct 2016 21:04:09 +0000"  >&lt;p&gt;Ldiskfs obviously does the same merge and commit. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %
4:                       0   0   0   |    1   4   4
8:                       0   0   0   |    0   0   4
16:                      0   0   0   |    0   0   4
32:                      0   0   0   |    1   4   8
64:                      0   0   0   |    1   4  12
128:                     0   0   0   |   12  48  60
256:                     0   0   0   |    2   8  68
512:                     0   0   0   |    7  28  96
1K:                      0   0   0   |    1   4 100

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
512K:                    0   0   0   |    4   3   3
1M:                      0   0   0   |   99  96 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="170821" author="zhiqi" created="Mon, 24 Oct 2016 21:11:43 +0000"  >&lt;p&gt;Actually the merge happened on the client side. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;                        read                    write
pages per rpc         rpcs   % cum % |       rpcs   % cum %
1:                       0   0   0   |          0   0   0
2:                       0   0   0   |          0   0   0
4:                       0   0   0   |          0   0   0
8:                       0   0   0   |          0   0   0
16:                      0   0   0   |          0   0   0
32:                      0   0   0   |          0   0   0
64:                      0   0   0   |          0   0   0
128:                     0   0   0   |          0   0   0
256:                     0   0   0   |          0   0   0
512:                     0   0   0   |          0   0   0
1024:                    0   0   0   |         25 100 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="170824" author="zhiqi" created="Mon, 24 Oct 2016 21:33:33 +0000"  >&lt;p&gt;With sync, LDISKFS&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kcli01 ost01]# dd if=/dev/zero of=File_2 count=1024 bs=100K oflag=sync
1024+0 records in
1024+0 records out
104857600 bytes (105 MB) copied, 2.58155 s, 40.6 MB/s

[root@kcli01 ost01]# lctl get_param osc.krakenfs-OST0001*.rpc_stats
osc.krakenfs-OST0001-osc-ffff880035853800.rpc_stats=
snapshot_time:         1477343197.889076 (secs.usecs)
read RPCs in flight:  0
write RPCs in flight: 0
pending write pages:  0
pending read pages:   0

                        read                    write
pages per rpc         rpcs   % cum % |       rpcs   % cum %
1:                       0   0   0   |          0   0   0
2:                       0   0   0   |          0   0   0
4:                       0   0   0   |          0   0   0
8:                       0   0   0   |          0   0   0
16:                      0   0   0   |          0   0   0
32:                      0   0   0   |       1024 100 100

                        read                    write
rpcs in flight        rpcs   % cum % |       rpcs   % cum %
0:                       0   0   0   |          0   0   0
1:                       0   0   0   |       1024 100 100

[root@koss02 ~]# lctl get_param obdfilter.krakenfs-OST0001.brw_stats
obdfilter.krakenfs-OST0001.brw_stats=
snapshot_time:         1477353369.955434 (secs.usecs)

                           read      |     write
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %
32:                      0   0   0   | 1024 100 100

                           read      |     write
discontiguous pages    rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   | 1024 100 100

                           read      |     write
discontiguous blocks   rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   | 1024 100 100

                           read      |     write
disk fragmented I/Os   ios   % cum % |  ios         % cum %
1:                       0   0   0   | 1024 100 100

                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   | 1024 100 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %
1:                       0   0   0   | 1023  99  99
2:                       0   0   0   |    0   0  99
4:                       0   0   0   |    1   0 100

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
128K:                    0   0   0   | 1024 100 100

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;With Sync, ZFS OST&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kcli01 ost01]# dd if=/dev/zero of=File_2 count=1024 bs=100K oflag=sync
1024+0 records in
1024+0 records out
104857600 bytes (105 MB) copied, 905.785 s, 116 kB/s

[root@kcli01 ost01]# lctl get_param osc.edafs-OST0001*.rpc_stats
osc.edafs-OST0001-osc-ffff880fc7dce800.rpc_stats=
snapshot_time:         1477344403.356697 (secs.usecs)
read RPCs in flight:  0
write RPCs in flight: 0
pending write pages:  0
pending read pages:   0

                        read                    write
pages per rpc         rpcs   % cum % |       rpcs   % cum %
1:                       0   0   0   |          0   0   0
2:                       0   0   0   |          0   0   0
4:                       0   0   0   |          0   0   0
8:                       0   0   0   |          0   0   0
16:                      0   0   0   |          0   0   0
32:                      0   0   0   |       1024 100 100

                        read                    write
rpcs in flight        rpcs   % cum % |       rpcs   % cum %
0:                       0   0   0   |          0   0   0
1:                       0   0   0   |       1024 100 100

                        read                    write
offset                rpcs   % cum % |       rpcs   % cum %
0:                       0   0   0   |          1   0   0
1:                       0   0   0   |          0   0   0
2:                       0   0   0   |          0   0   0
4:                       0   0   0   |          0   0   0
8:                       0   0   0   |          0   0   0
16:                      0   0   0   |          1   0   0
32:                      0   0   0   |          1   0   0
64:                      0   0   0   |          3   0   0
128:                     0   0   0   |          5   0   1
256:                     0   0   0   |         10   0   2
512:                     0   0   0   |         20   1   4
1024:                    0   0   0   |         41   4   8
2048:                    0   0   0   |         82   8  16
4096:                    0   0   0   |        164  16  32
8192:                    0   0   0   |        328  32  64
16384:                   0   0   0   |        368  35 100

[root@kapollo01 ~]# lctl get_param obdfilter.edafs-OST0001.brw_stats
obdfilter.edafs-OST0001.brw_stats=
snapshot_time:         1477362848.412812 (secs.usecs)

                           read      |     write
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %
1:                       0   0   0   | 1025  50  50
2:                       0   0   0   |    0   0  50
4:                       0   0   0   |    0   0  50
8:                       0   0   0   |    0   0  50
16:                      0   0   0   |    0   0  50
32:                      0   0   0   | 1024  49 100

                           read      |     write
discontiguous pages    rpcs  % cum % |  rpcs        % cum %
0:                       0   0   0   | 1025  50  50
1:                       0   0   0   |    0   0  50
2:                       0   0   0   |    0   0  50
3:                       0   0   0   |    0   0  50
4:                       0   0   0   |    0   0  50
5:                       0   0   0   |    0   0  50
6:                       0   0   0   |    0   0  50
7:                       0   0   0   |    0   0  50
8:                       0   0   0   |    0   0  50
9:                       0   0   0   |    0   0  50
10:                      0   0   0   |    0   0  50
11:                      0   0   0   |    0   0  50
12:                      0   0   0   |    0   0  50
13:                      0   0   0   |    0   0  50
14:                      0   0   0   |    0   0  50
15:                      0   0   0   |    0   0  50
16:                      0   0   0   |    0   0  50
17:                      0   0   0   |    0   0  50
18:                      0   0   0   |    0   0  50
19:                      0   0   0   |    0   0  50
20:                      0   0   0   |    0   0  50
21:                      0   0   0   |    0   0  50
22:                      0   0   0   |    0   0  50
23:                      0   0   0   |    0   0  50
24:                      0   0   0   | 1024  49 100

                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   | 2049 100 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
128:                     0   0   0   | 1025  50  50
256:                     0   0   0   |    0   0  50
512:                     0   0   0   |    0   0  50
1K:                      0   0   0   |    0   0  50
2K:                      0   0   0   |    0   0  50
4K:                      0   0   0   |    0   0  50
8K:                      0   0   0   |    0   0  50
16K:                     0   0   0   |    0   0  50
32K:                     0   0   0   |    0   0  50
64K:                     0   0   0   |    0   0  50
128K:                    0   0   0   | 1024  49 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So this 128 byte is actually associated with each disk IO.&lt;/p&gt;</comment>
                            <comment id="170827" author="adilger" created="Mon, 24 Oct 2016 21:43:38 +0000"  >&lt;p&gt;I&apos;d agree with Jinshan - &lt;tt&gt;brw_stats&lt;/tt&gt; should only contain information about the BRW data read/write operations.  If the administrator wants to monitor all writes going to a device then they can use &lt;tt&gt;iostat&lt;/tt&gt; or similar.&lt;/p&gt;

&lt;p&gt;Zhiqi, your latest data shows that the client is only sending 128KB RPCs, but the osd-zfs code is incorrectly accounting the extra 128-byte local write (suspect &lt;tt&gt;last_rcvd&lt;/tt&gt;) in the &lt;tt&gt;brw_stats&lt;/tt&gt; when it shouldn&apos;t.  I don&apos;t know if this is easily fixed or not.&lt;/p&gt;

&lt;p&gt;Alex, could you please take a quick look?&lt;/p&gt;</comment>
                            <comment id="170950" author="gerrit" created="Tue, 25 Oct 2016 14:29:41 +0000"  >&lt;p&gt;Alex Zhuravlev (alexey.zhuravlev@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/23363&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23363&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8775&quot; title=&quot;ZFS OST has an extra 128 byte for each IO&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8775&quot;&gt;&lt;del&gt;LDEV-521&lt;/del&gt;&lt;/a&gt; osd: do not report special writes in brw stats&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a68320ec32de1a3e765cc5fa0e7610128135e019&lt;/p&gt;</comment>
                            <comment id="171520" author="zhiqi" created="Fri, 28 Oct 2016 05:23:09 +0000"  >&lt;p&gt;I tried this build and confirm that the 128 bytes are no long there. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@kcli05 ~]# dd if=/dev/zero of=/mnt/lustre/File_2 count=100 bs=101K oflag=sync
100+0 records in
100+0 records out
10342400 bytes (10 MB) copied, 14.1616 s, 730 kB/s
[root@kcli05 ~]# lctl get_param obdfilter.lustre-OST0001.brw_stats
obdfilter.lustre-OST0001.brw_stats=
snapshot_time:         1477631083.556473 (secs.usecs)

                           read      |     write
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %
32:                      0   0   0   |  100 100 100

                           read      |     write
discontiguous pages    rpcs  % cum % |  rpcs        % cum %
25:                      0   0   0   |  100 100 100

                           read      |     write
disk I/Os in flight    ios   % cum % |  ios         % cum %
1:                       0   0   0   |  100 100 100

                           read      |     write
I/O time (1/1000s)     ios   % cum % |  ios         % cum %

                           read      |     write
disk I/O size          ios   % cum % |  ios         % cum %
128K:                    0   0   0   |  100 100 100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="171676" author="gerrit" created="Fri, 28 Oct 2016 23:50:21 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/23363/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23363/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8775&quot; title=&quot;ZFS OST has an extra 128 byte for each IO&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8775&quot;&gt;&lt;del&gt;LU-8775&lt;/del&gt;&lt;/a&gt; osd: do not report special writes in brw stats&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: ff9ac8af1e5bb009f0353e28fcea63902cff0066&lt;/p&gt;</comment>
                            <comment id="171694" author="pjones" created="Sat, 29 Oct 2016 00:23:55 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyt4v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>