<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:26:35 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2600] lustre metadata performance is very slow on zfs</title>
                <link>https://jira.whamcloud.com/browse/LU-2600</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;mds-survey show create and unlink are very slow&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds01 mds01&amp;#93;&lt;/span&gt;# tests_str=&quot;create lookup destroy&quot; thrlo=192 thrhi=192 file_count=3840000 mds-survey &lt;br/&gt;
Wed Jan  2 16:43:00 PST 2013 /usr/bin/mds-survey from mds01&lt;br/&gt;
mdt 1 file 3840000 dir  192 thr  192 create 2220.02 [   0.00,19997.98] lookup 9429.79 [   0.00,41998.40] destroy 1545.46 [   0.00,15998.32] &lt;br/&gt;
done!&lt;/p&gt;
</description>
                <environment>mdt is a zpool with 3 sata drives&lt;br/&gt;
&lt;br/&gt;
&amp;nbsp;&amp;nbsp;pool: pool2&lt;br/&gt;
&amp;nbsp;state: ONLINE&lt;br/&gt;
&amp;nbsp;scan: none requested&lt;br/&gt;
config:&lt;br/&gt;
&lt;br/&gt;
&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;NAME        STATE     READ WRITE CKSUM&lt;br/&gt;
&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;pool2       ONLINE       0     0     0&lt;br/&gt;
&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;sdb       ONLINE       0     0     0&lt;br/&gt;
&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;sdg       ONLINE       0     0     0&lt;br/&gt;
&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;sdh       ONLINE       0     0     0&lt;br/&gt;
</environment>
        <key id="17129">LU-2600</key>
            <summary>lustre metadata performance is very slow on zfs</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="mdiep">Minh Diep</reporter>
                        <labels>
                            <label>JL</label>
                            <label>performance</label>
                            <label>prz</label>
                            <label>zfs</label>
                    </labels>
                <created>Thu, 10 Jan 2013 02:52:28 +0000</created>
                <updated>Thu, 9 Jan 2020 07:23:54 +0000</updated>
                            <resolved>Thu, 9 Jan 2020 07:23:54 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>16</watches>
                                                                            <comments>
                            <comment id="50249" author="bzzz" created="Thu, 10 Jan 2013 02:55:14 +0000"  >&lt;p&gt;if possible it&apos;d be interesting to try with:&lt;br/&gt;
1) quota accounting disabled (comment two zap_increment_int() in osd_object_create() in osd-zfs)&lt;br/&gt;
2) lma setting disabled (comment osd_init_lma() in tat osd_object_create())&lt;/p&gt;</comment>
                            <comment id="50427" author="mdiep" created="Mon, 14 Jan 2013 12:20:02 +0000"  >&lt;p&gt;1) quota accounting disabled (comment two zap_increment_int() in osd_object_create() in osd-zfs)&lt;/p&gt;

&lt;p&gt;zfs&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds01 mds01&amp;#93;&lt;/span&gt;# dir_count=192 thrlo=192 thrhi=192 file_count=3840000 mds-survey&lt;br/&gt;
Tue Jan 15 08:46:01 PST 2013 /usr/bin/mds-survey from mds01&lt;br/&gt;
mdt 1 file 3840000 dir  192 thr  192 create 3033.82 [   0.00,20997.59] lookup 8937.11 [   0.00,38997.93] md_getattr 2386.50 [   0.00,19996.00] setxattr 2788.85 [   0.00,16998.22] destroy 1572.63 [   0.00,15998.16] &lt;br/&gt;
done!&lt;/p&gt;

&lt;p&gt;ldiskfs&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds01 mds01&amp;#93;&lt;/span&gt;# dir_count=192 thrlo=192 thrhi=192 file_count=3840000 mds-survey&lt;br/&gt;
Tue Jan 15 08:27:37 PST 2013 /usr/bin/mds-survey from mds01&lt;br/&gt;
mdt 1 file 3840000 dir  192 thr  192 create 12324.83 [   0.00,191982.34] lookup 2082165.28 &lt;span class=&quot;error&quot;&gt;&amp;#91;2082165.28,2082165.28&amp;#93;&lt;/span&gt; md_getattr 849267.98 &lt;span class=&quot;error&quot;&gt;&amp;#91;807941.83,807941.83&amp;#93;&lt;/span&gt; setxattr 13708.98 [   0.00,191982.53] destroy 15192.13 [   0.00,191980.23] &lt;br/&gt;
done!&lt;/p&gt;


&lt;p&gt;2) lma setting disabled (comment osd_init_lma() in tat osd_object_create()) + quota acct disable&lt;/p&gt;

&lt;p&gt;zfs&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds01 mds01&amp;#93;&lt;/span&gt;# dir_count=192 thrlo=192 thrhi=192 file_count=3840000 mds-survey&lt;br/&gt;
Mon Jan 14 22:17:39 PST 2013 /usr/bin/mds-survey from mds01&lt;br/&gt;
mdt 1 file 3840000 dir  192 thr  192 create 6278.19 [   0.00,20997.61] lookup 60380.25 [   0.00,128983.49] md_getattr 57846.24 [   0.00,179987.40] setxattr&lt;br/&gt;
3235.05 [   0.00,57991.13] destroy 2234.80 [   0.00,14998.41] &lt;br/&gt;
done!&lt;/p&gt;

&lt;p&gt;ldiskfs&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds01 mds01&amp;#93;&lt;/span&gt;# dir_count=192 thrlo=192 thrhi=192 file_count=3840000 mds-survey                                                                                        Mon Jan 14 23:26:56 PST 2013 /usr/bin/mds-survey from mds01&lt;br/&gt;
mdt 1 file 3840000 dir  192 thr  192 create 13928.46 [   0.00,191979.27] lookup 2028421.14 &lt;span class=&quot;error&quot;&gt;&amp;#91;2028421.14,2028421.14&amp;#93;&lt;/span&gt; md_getattr 829627.73 &lt;span class=&quot;error&quot;&gt;&amp;#91;809944.92,809944.92&amp;#93;&lt;/span&gt; setxattr 16770.19 [   0.00,191985.22] destroy 14236.86 [   0.00,183984.18] &lt;br/&gt;
done!&lt;/p&gt;</comment>
                            <comment id="50637" author="bzzz" created="Thu, 17 Jan 2013 02:41:52 +0000"  >&lt;p&gt;please, collect:&lt;/p&gt;

&lt;p&gt;1) /proc/spl/kstat/zfs/dmu_tx - just before the run and right after&lt;br/&gt;
   to see how often txgs are overflowed&lt;/p&gt;

&lt;p&gt;2) /proc/spl/kstat/zfs/txgs-*-mdt1 - few times during the run&lt;br/&gt;
   to see amount of reads/writes, lifetime of txgs&lt;/p&gt;

&lt;p&gt;also, on my local setup (4GB RAM) I observed with 16+ threads txgs are overflowed all the time&lt;br/&gt;
and only 8 threads let txg to go well. but with 8 threads overall performance isn&apos;t that greast.&lt;br/&gt;
please try with different number of threads, collect (1) and (2)&lt;/p&gt;
</comment>
                            <comment id="58994" author="cliffw" created="Tue, 21 May 2013 17:50:12 +0000"  >&lt;p&gt;agb5 -MDS/MGS, iwc client, dit29 -OSS&lt;/p&gt;</comment>
                            <comment id="58996" author="pjones" created="Tue, 21 May 2013 17:57:29 +0000"  >&lt;p&gt;Alex&lt;/p&gt;

&lt;p&gt;Could you please pass comment on this?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="59019" author="cliffw" created="Tue, 21 May 2013 22:23:13 +0000"  >&lt;p&gt;Oprofile data from second mdtest run. MDS/MGS = agb5, OSS = agb14, client = iwc44&lt;/p&gt;</comment>
                            <comment id="59020" author="cliffw" created="Tue, 21 May 2013 22:32:34 +0000"  >&lt;p&gt;We have done further testing with oprofile on Hyperion. We see ZFS performance about 1/2 to 1/3 of ldiskfs performance on the same hardware. &lt;br/&gt;
Sample ZFS run of mdtest 64 clients:&lt;br/&gt;
MDTEST RESULTS&lt;br/&gt;
0000: SUMMARY: (of 2 iterations)&lt;br/&gt;
0000:    Operation                  Max        Min       Mean    Std Dev&lt;br/&gt;
0000:    ---------                  &amp;#8212;        &amp;#8212;       ----    -------&lt;br/&gt;
0000:    Directory creation:   6763.114   6344.339   6553.727    209.388&lt;br/&gt;
0000:    Directory stat    :  90793.846  81346.095  86069.971   4723.875&lt;br/&gt;
0000:    Directory removal :   8081.377   7495.103   7788.240    293.137&lt;br/&gt;
0000:    File creation     :   5954.588   4746.843   5350.716    603.872&lt;br/&gt;
0000:    File stat         : 106938.996 106643.378 106791.187    147.809&lt;br/&gt;
0000:    File removal      :   6058.458   5910.333   5984.395     74.062&lt;br/&gt;
0000:    Tree creation     :      4.561      1.916      3.239      1.323&lt;br/&gt;
0000:    Tree removal      :      4.861      4.851      4.856      0.005&lt;br/&gt;
ldiskfs:&lt;br/&gt;
0000: SUMMARY: (of 5 iterations)&lt;br/&gt;
0000:    Operation                  Max        Min       Mean    Std Dev&lt;br/&gt;
0000:    ---------                  &amp;#8212;        &amp;#8212;       ----    -------&lt;br/&gt;
0000:    Directory creation:  23037.696  13215.305  17736.740   4389.854&lt;br/&gt;
0000:    Directory stat    : 109887.449 108488.532 109053.155    461.796&lt;br/&gt;
0000:    Directory removal :  29968.170  18835.706  26369.453   4035.133&lt;br/&gt;
0000:    File creation     :  25026.952  21532.197  23428.767   1151.335&lt;br/&gt;
0000:    File stat         : 109340.749 107623.272 108451.866    724.519&lt;br/&gt;
0000:    File removal      :  31242.455  19217.523  24616.635   4673.904&lt;br/&gt;
0000:    Tree creation     :     25.988     22.719     24.484      1.050&lt;br/&gt;
0000:    Tree removal      :     16.938     13.962     15.872      1.069&lt;br/&gt;
----------&lt;br/&gt;
oprofile of MDS, one client and one OSS attached.&lt;/p&gt;
</comment>
                            <comment id="59112" author="keith" created="Wed, 22 May 2013 21:01:44 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;vma      samples  %        image name               app name                 symbol name
ffffffff812d3cd0 89202     8.2903  vmlinux                  vmlinux                  intel_idle
ffffffff8127f7d0 37003     3.4390  vmlinux                  vmlinux                  format_decode
ffffffff812811b0 26557     2.4682  vmlinux                  vmlinux                  vsnprintf
ffffffff8127f3f0 24524     2.2792  vmlinux                  vmlinux                  number
0000000000040540 24463     2.2736  zfs.ko                   zfs.ko                   lzjb_decompress
ffffffff812834c0 20643     1.9185  vmlinux                  vmlinux                  memcpy
ffffffff812d93e0 19812     1.8413  vmlinux                  vmlinux                  port_inb
ffffffff8150f460 17173     1.5960  vmlinux                  vmlinux                  mutex_lock
ffffffff81059540 14331     1.3319  vmlinux                  vmlinux                  find_busiest_group
ffffffff81169160 14164     1.3164  vmlinux                  vmlinux                  kfree
ffffffff81415450 13765     1.2793  vmlinux                  vmlinux                  poll_idle
ffffffff8150f1a0 13696     1.2729  vmlinux                  vmlinux                  mutex_unlock
0000000000040610 13003     1.2085  zfs.ko                   zfs.ko                   lzjb_compress
ffffffff81283780 11341     1.0540  vmlinux                  vmlinux                  memset
0000000000003090 10486     0.9746  spl.ko                   spl.ko                   taskq_thread
ffffffff8150db90 9346      0.8686  vmlinux                  vmlinux                  schedule
ffffffff8127eb70 9255      0.8602  vmlinux                  vmlinux                  strrchr
ffffffff81052130 9172      0.8524  vmlinux                  vmlinux                  mutex_spin_on_owner
ffffffff8127ec90 8590      0.7983  vmlinux                  vmlinux                  strlen
ffffffff8109b960 8120      0.7547  vmlinux                  vmlinux                  __hrtimer_start_range_ns
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Why are print functions so high?&lt;/p&gt;</comment>
                            <comment id="59140" author="bzzz" created="Thu, 23 May 2013 03:51:15 +0000"  >&lt;p&gt;in my local testing it&apos;s EAs which were pretty expensive. also, I&apos;d expect quota accouting contributes to this as well.&lt;/p&gt;</comment>
                            <comment id="59166" author="liang" created="Thu, 23 May 2013 14:40:55 +0000"  >&lt;p&gt;I would say printf is fine, CDEBUG should contribute most of them.&lt;br/&gt;
Actually I cannot get too much information from the oprofile output, everything looks reasonable, which means there could be heavy operations protected by mutex/semaphore.&lt;/p&gt;</comment>
                            <comment id="59171" author="keith" created="Thu, 23 May 2013 15:41:25 +0000"  >&lt;p&gt;Cliff I wonder do you have any iostat data or Lustre /proc stats?&lt;/p&gt;</comment>
                            <comment id="59349" author="bzzz" created="Mon, 27 May 2013 05:28:55 +0000"  >&lt;p&gt;I remember Brian B. said it&apos;s doing OK locally. would you mind to try few createmany in parallel with locally mounted ZFS, please? so we have some basic numbers for pure ZFS? Lustre is doing much more than that (OI, few EAs, etc), but still the numbers would give us some idea.&lt;/p&gt;</comment>
                            <comment id="62433" author="cliffw" created="Tue, 16 Jul 2013 22:23:39 +0000"  >&lt;p&gt;We may be able to do this in the next test session. Kieth, there are no brw_stats available under ZFS &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/sad.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;</comment>
                            <comment id="62435" author="keith" created="Tue, 16 Jul 2013 22:41:08 +0000"  >&lt;p&gt;Yea lets test this the next session. &lt;/p&gt;

&lt;p&gt;I will help setup some basic iostat so we can get a little better picture of the data rates to the disk themselves. &lt;/p&gt;
</comment>
                            <comment id="65623" author="adilger" created="Tue, 3 Sep 2013 16:35:20 +0000"  >&lt;p&gt;The patch from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3671&quot; title=&quot;why are permission changes synchronous?&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3671&quot;&gt;&lt;del&gt;LU-3671&lt;/del&gt;&lt;/a&gt; (&lt;a href=&quot;http://review.whamcloud.com/7257&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7257&lt;/a&gt; &quot;mdd: sync perm for dir and perm reduction only&quot;) may help this a little bit, but there are still other issues that need to be worked on.&lt;/p&gt;</comment>
                            <comment id="67122" author="adilger" created="Fri, 20 Sep 2013 15:55:43 +0000"  >&lt;p&gt;Actually, my previous comment is incorrect. That patch may help with some real-world workloads like untar, but would not help mds-survey or similar that are not doing chown/chmod. &lt;/p&gt;

&lt;p&gt;In &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2476&quot; title=&quot;poor OST file creation rate performance with zfs backend&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2476&quot;&gt;&lt;del&gt;LU-2476&lt;/del&gt;&lt;/a&gt; Alex posted a link to &lt;a href=&quot;http://review.whamcloud.com/7157&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7157&lt;/a&gt; &quot;a proto for optimized object accounting&quot; which I think is actually more relevant to this bug.  It batches the quota accounting updates, which was part of the change in Minh&apos;s first test that doubled the ZFS performance. However it wasn&apos;t clear if it was the quota zap or the LMA/LIV xattrs that were the main bottleneck, so it would be good to test those separately. &lt;/p&gt;</comment>
                            <comment id="68112" author="adilger" created="Tue, 1 Oct 2013 20:53:29 +0000"  >&lt;p&gt;Some improvements have been made to ZFS performance, but this is still an ongoing issue so move this to 2.5.1 along with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2476&quot; title=&quot;poor OST file creation rate performance with zfs backend&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2476&quot;&gt;&lt;del&gt;LU-2476&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="68568" author="thomas.stibor" created="Tue, 8 Oct 2013 08:50:20 +0000"  >&lt;p&gt;I did some benchmarking with Lustre-ZFS vs. Lustre-LDISKFS and ZFS vs. EXT4 with mdtest. The results suggests that the slow metadata performance is probably due to ZFS rather than to Lustre. The following setup is used:&lt;/p&gt;

&lt;p&gt;1 MGS/MDT server, formatted with ldiskfs(ext4) or ZFS &lt;span class=&quot;error&quot;&gt;&amp;#91;build:  2.4.0-RC2-gd3f91c4-PRISTINE-2.6.32-358.6.2.el6_lustre.g230b174.x86_64&amp;#93;&lt;/span&gt;&lt;br/&gt;
1 OSS/OST server, formatted with ZFS &lt;span class=&quot;error&quot;&gt;&amp;#91;build:  v2_4_92_0-ge089a51-CHANGED-3.6.11-lustre-tstibor-build&amp;#93;&lt;/span&gt;&lt;br/&gt;
1 Client &lt;span class=&quot;error&quot;&gt;&amp;#91;build: v2_4_92_0-ge089a51-CHANGED-3.6.11-lustre-tstibor-build&amp;#93;&lt;/span&gt;&lt;br/&gt;
(lustre mountpoint /mnt)&lt;/p&gt;

&lt;p&gt;The benchmark is performed on the client&lt;br/&gt;
and gives the following results:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;** Setup, single MDT0 with ZFS, OSS/OST with ZFS and mdtest executed on the client
-- started at 10/07/2013 16:43:48 --

mdtest-1.9.1 was launched with 1 total task(s) on 1 node(s)
Command line used: ./mdtest -i 20 -b 2 -I 80 -z 5 -d /mnt/mdtest/
Path: /mnt/mdtest
FS: 98.7 TiB   Used FS: 0.0%   Inodes: 0.5 Mi   Used Inodes: 0.0%

1 tasks, 5040 files/directories

SUMMARY: (of 20 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   Directory creation:       1948.194       1717.011       1814.171         58.454
   Directory stat    :       8550.010       7276.497       8112.847        415.032
   Directory removal :       2045.658       1892.629       1963.691         46.917
   File creation     :       1188.975       1118.650       1152.378         18.880
   File stat         :       3398.468       3222.576       3328.069         53.387
   File read         :       8630.149       8034.409       8421.248        151.027
   File removal      :       1393.756       1296.246       1340.168         28.650
   Tree creation     :       1853.699        713.171       1713.243        234.610
   Tree removal      :       1811.968       1600.404       1734.573         42.491

-- finished at 10/07/2013 16:49:14 --
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;** Setup, single MDT0 with ldiskfs (ext4), OSS/OST with ZFS and mdtest executed on the client
-- started at 10/07/2013 15:17:41 --
mdtest-1.9.1 was launched with 1 total task(s) on 1 node(s)
Command line used: ./mdtest -i 20 -b 2 -I 80 -z 5 -d /mnt/mdtest/
Path: /mnt/mdtest
FS: 98.7 TiB   Used FS: 0.0%   Inodes: 32.0 Mi   Used Inodes: 0.0%

1 tasks, 5040 files/directories

SUMMARY: (of 20 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   Directory creation:       3797.437       3241.010       3581.207        179.154
   Directory stat    :       8885.475       8488.148       8680.477         89.058
   Directory removal :       3815.363       3292.796       3638.044        159.870
   File creation     :       2451.821       2284.533       2364.546         49.688
   File stat         :       3532.868       3284.716       3426.642         68.167
   File read         :       8745.646       7888.261       8479.615        199.443
   File removal      :       2659.047       2475.945       2573.788         64.199
   Tree creation     :       3522.699        797.295       3290.452        578.813
   Tree removal      :       3246.246       2869.909       3151.856         75.039

-- finished at 10/07/2013 15:20:52 --
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Roughly speaking ldiskfs is nearly twice as fast as ZFS on the&lt;br/&gt;
artificial metadata tests except on stat calls and read.&lt;/p&gt;

&lt;p&gt;Repeating the experiment, however, this time on plain formated ext4 and ZFS filesystems (no Lustre involved).&lt;br/&gt;
The underlying hardware is the original MGS/MDT Server results in:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;*** EXT4
-- started at 10/08/2013 10:26:55 --

mdtest-1.9.1 was launched with 1 total task(s) on 1 node(s)
Command line used: ./mdtest -i 20 -b 2 -I 80 -z 5 -d /ext4/mdtest
Path: /ext4
FS: 63.0 GiB   Used FS: 0.3%   Inodes: 4.0 Mi   Used Inodes: 0.0%

1 tasks, 5040 files/directories

SUMMARY: (of 20 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   Directory creation:      40562.779      30483.751      35626.407       3019.069
   Directory stat    :     146904.697     144106.646     145177.353        735.623
   Directory removal :      45658.402      18579.207      42666.602       7721.446
   File creation     :      55150.631      54306.775      54710.376        272.139
   File stat         :     145148.567     142614.316     143752.697        712.729
   File read         :     118738.722     115982.356     117299.713        677.185
   File removal      :      74535.433      72932.338      73898.577        552.812
   Tree creation     :      45488.234      19224.529      30160.072       8360.361
   Tree removal      :      21829.091      21270.317      21597.907        166.265

-- finished at 10/08/2013 10:27:06 --
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;*** ZFS
-- started at 10/08/2013 10:24:13 --

mdtest-1.9.1 was launched with 1 total task(s) on 1 node(s)
Command line used: ./mdtest -i 20 -b 2 -I 80 -z 5 -d /zfs/mdtest
Path: /zfs
FS: 63.0 GiB   Used FS: 0.0%   Inodes: 126.0 Mi   Used Inodes: 0.0%

1 tasks, 5040 files/directories

SUMMARY: (of 20 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   Directory creation:      17430.759       3494.324      13857.069       3667.221
   Directory stat    :     126509.106     124125.352     125720.502        641.879
   Directory removal :      17380.099       1341.726      16070.861       3468.179
   File creation     :      19416.201       1946.750      14450.802       4466.843
   File stat         :     126687.275     124279.327     125842.726        602.232
   File read         :     109161.802     106555.834     107863.681        674.730
   File removal      :      18087.791       1073.455      15315.115       5133.140
   Tree creation     :      19085.674       3313.867      17736.690       3428.476
   Tree removal      :      11679.683       1222.614      10843.046       2247.838

-- finished at 10/08/2013 10:24:58 --
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Of course one can question how well such a metadata benchmark are reflecting true working sets, however, by just observing the plain ZFS vs. ext4 one could conclude that the slow metadata performance is NOT due to Lustre.&lt;/p&gt;

&lt;p&gt;Thomas.&lt;/p&gt;</comment>
                            <comment id="83635" author="utopiabound" created="Fri, 9 May 2014 15:27:59 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#/c/7157/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7157/&lt;/a&gt; was reverted to fix &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4968&quot; title=&quot;Test failure sanity test_132: umount /mnt/ost2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4968&quot;&gt;&lt;del&gt;LU-4968&lt;/del&gt;&lt;/a&gt; if this is resubmitted please include the changs that happened for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4944&quot; title=&quot;build fails with latest zfs source&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4944&quot;&gt;&lt;del&gt;LU-4944&lt;/del&gt;&lt;/a&gt; (&lt;a href=&quot;http://review.whamcloud.com/#/c/10064/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10064/&lt;/a&gt;)&lt;/p&gt;</comment>
                            <comment id="96083" author="isaac" created="Thu, 9 Oct 2014 21:35:00 +0000"  >&lt;p&gt;Just a note that if a patch that uses dsl_sync_task is landed again, we&apos;d need to patch zfs so as not to increase async writes when there&apos;s only nowaiter sync tasks pending. See:&lt;br/&gt;
&lt;a href=&quot;https://github.com/zfsonlinux/zfs/pull/2716#issuecomment-58540555&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/pull/2716#issuecomment-58540555&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="129667" author="bzzz" created="Wed, 7 Oct 2015 07:28:39 +0000"  >&lt;p&gt;ZAP prefetching at object creation should improve metadata performance.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10120">
                    <name>Blocker</name>
                                                                <inwardlinks description="is blocked by">
                                        <issuelink>
            <issuekey id="24642">LU-5041</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32397">LU-7235</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="12567">LU-2476</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="21440">LU-4108</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="24432">LU-4968</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="23416">LU-4696</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12890" name="oprofile.tar.gz" size="1849722" author="cliffw" created="Tue, 21 May 2013 17:50:12 +0000"/>
                            <attachment id="12893" name="oprofile2.tar.gz" size="1057900" author="cliffw" created="Tue, 21 May 2013 22:23:13 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvexj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6060</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>