<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:32:45 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3305] Quotas affect Metadata performance</title>
                <link>https://jira.whamcloud.com/browse/LU-3305</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We performed a comparison between 2.3.0, 2.1.5 and current Lustre. We say a regression in metadata performance compared to 2.3.0. Spreadsheet attached.  &lt;/p&gt;</description>
                <environment>Hyperion/LLNL</environment>
        <key id="18779">LU-3305</key>
            <summary>Quotas affect Metadata performance</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="cliffw">Cliff White</reporter>
                        <labels>
                    </labels>
                <created>Thu, 9 May 2013 16:31:48 +0000</created>
                <updated>Tue, 17 Sep 2013 04:54:51 +0000</updated>
                            <resolved>Thu, 5 Sep 2013 22:46:24 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.1</fixVersion>
                    <fixVersion>Lustre 2.5.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>20</watches>
                                                                            <comments>
                            <comment id="58033" author="keith" created="Thu, 9 May 2013 17:41:34 +0000"  >&lt;p&gt;Do you have any other data from the runs to share?  Did you confirm the storage was ok for all 3 runs? What did the io stats look like on the MDS?&lt;/p&gt;

&lt;p&gt;What sort of regular variation are you seeing in runs? The &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3281&quot; title=&quot;IO Fails - client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3281&quot;&gt;&lt;del&gt;LU-3281&lt;/del&gt;&lt;/a&gt; and 2.3.64 runs should be about the same. &lt;/p&gt;

&lt;p&gt;What did the Lustre stats in /proc look like?&lt;/p&gt;

&lt;p&gt;Is there any oprofile data from the MDS?&lt;/p&gt;</comment>
                            <comment id="58039" author="adilger" created="Thu, 9 May 2013 17:58:24 +0000"  >&lt;p&gt;Cliff, is it possible for you to easily run the same &quot;mds-survey&quot; test that you ran for 2.3.63 DNE testing on the same hardware with 2.3.0, so that we can compare MDS-local performance between the two versions?  That would help us isolate whether the performance degradation is on the MDS side, or if it relates to the object creation.&lt;/p&gt;

&lt;p&gt;Failing that, is it possible to get Minh to run mds-survey (with and without object creation) on 2.3.0 and 2.3.63 for comparison.&lt;/p&gt;</comment>
                            <comment id="58043" author="cliffw" created="Thu, 9 May 2013 18:09:17 +0000"  >&lt;p&gt;The test itself is not difficult, it&apos;s a question of availability/timing vs other requests for Hyperion. &lt;/p&gt;</comment>
                            <comment id="58045" author="cliffw" created="Thu, 9 May 2013 18:12:44 +0000"  >&lt;p&gt;Kieth, the runs are an average of 5 iterations for IOR.  The storage was fine for all three runs. We&apos;ve stopped using the problematic nodes for these tests. There is no oprofile data from the MDS. &lt;/p&gt;</comment>
                            <comment id="58046" author="rread" created="Thu, 9 May 2013 18:17:22 +0000"  >&lt;p&gt;Do you mean mdtest instead of IOR?&lt;/p&gt;</comment>
                            <comment id="58399" author="cliffw" created="Mon, 13 May 2013 21:26:33 +0000"  >&lt;p&gt;Keith was referring to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3281&quot; title=&quot;IO Fails - client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3281&quot;&gt;&lt;del&gt;LU-3281&lt;/del&gt;&lt;/a&gt;, which was a bug involving IOR. &lt;br/&gt;
We also run 5 iterations for mdtest. &lt;/p&gt;</comment>
                            <comment id="58464" author="mdiep" created="Tue, 14 May 2013 16:59:58 +0000"  >&lt;p&gt;Opensfs cluster run&lt;/p&gt;

&lt;p&gt;2.3.65&lt;br/&gt;
mdtest fpp: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/697bfbb2-bca3-11e2-b6d1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/697bfbb2-bca3-11e2-b6d1-52540035b04c&lt;/a&gt;&lt;br/&gt;
mdtest ssf: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6b79a220-bca3-11e2-8441-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6b79a220-bca3-11e2-8441-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2.3.0&lt;br/&gt;
mdtest fpp: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/0206a48c-bcb7-11e2-8441-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/0206a48c-bcb7-11e2-8441-52540035b04c&lt;/a&gt;&lt;br/&gt;
mdtest ssf: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/054d925e-bcb7-11e2-8441-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/054d925e-bcb7-11e2-8441-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="58543" author="liang" created="Wed, 15 May 2013 06:19:01 +0000"  >&lt;p&gt;2.3/2.4 mdtest performance data&lt;/p&gt;</comment>
                            <comment id="58545" author="liang" created="Wed, 15 May 2013 06:22:24 +0000"  >&lt;p&gt;As we can see from these graphs, the major difference is about directory per process creation performance, but stddev of all creation tests are very high, for example, 2.3 directory per process creation, stddev value is above 50% of average value. One possible reason could be journal size, I think our default journal size is still 1GB? I would suggest to have 2G internal journal at least, and repeat 10 or more times.&lt;br/&gt;
Another thing I cannot explain is, why shared directory file stat performance is so much worse than dir per process?&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;image-wrap&quot; style=&quot;&quot;&gt;&lt;img src=&quot;https://jira.hpdd.intel.com/secure/attachment/12826/Screen%20Shot%202013-05-15%20at%202.18.52%20PM.png&quot; style=&quot;border: 0px solid black&quot; /&gt;&lt;/span&gt;&lt;/p&gt;</comment>
                            <comment id="58655" author="ihara" created="Thu, 16 May 2013 14:13:23 +0000"  >&lt;p&gt;I also hit same regression on the latest master commit and started to find where retrogression started. &quot;git bisect&quot; would help. I will post soon which commit causes this regressions.&lt;/p&gt;</comment>
                            <comment id="58773" author="mdiep" created="Fri, 17 May 2013 16:45:06 +0000"  >&lt;p&gt;comparison between 2.3 and 2.4.50 RC1&lt;/p&gt;</comment>
                            <comment id="58791" author="ihara" created="Fri, 17 May 2013 17:33:12 +0000"  >&lt;p&gt;Unique direcotry&lt;/p&gt;
&lt;div class=&apos;table-wrap&apos;&gt;
&lt;table class=&apos;confluenceTable&apos;&gt;&lt;tbody&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;version&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;commit&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;Dir creation&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;Dir stat&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;Dir removal&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;File creation&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;File stat&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;File removal&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;v2_3_50_0-143&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;9ddf386035767a96b54e21559f3ea0be126dc8cd&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;22167&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;194845&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;&lt;font color=&quot;red&quot;&gt; 32910&lt;/font&gt;&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;50090&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;132461&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;&lt;font color=&quot;red&quot;&gt;36762&lt;/font&gt;&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;v2_3_50_0-142&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;c61d09e9944ae47f68eb159224af7c5456cc180a&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;21835&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;203123&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;46178&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;45941&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;147281&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;67253&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;v2_3_50_0-141&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;48bad5d9db9baa7bca093de5c54294adf1cf8303&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;20542&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;202014&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;48271&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;46264&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;142450&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;66267&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;2.3.50&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;04ec54ff56b83a9114f7a25fbd4aa5f65e68ef7a&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;24727&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;226128&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;34064&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;27859&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;124521&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;34591&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;2.3&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;ee695bf0762f5dbcb2ac6d96354f8d01ad764903&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;23565&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;222482&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;44421&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;48333&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;125765&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;76709&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
&lt;/div&gt;


&lt;p&gt;It seems that there are several regression points between 2.3 and 2.4, but at least on my small client testing(tested mdtest on 16 clients, 32 process), commit 9ddf386035767a96b54e21559f3ea0be126dc8cd might be one of regression point for unlink operation. (I collected more data at commit points, but this is one of point big differnce between commits.&lt;br/&gt;
I&apos;m still working to verify wether this is exactly point. will run on large number of client environment. &lt;/p&gt;</comment>
                            <comment id="58801" author="pjones" created="Fri, 17 May 2013 20:07:52 +0000"  >&lt;p&gt;Niu&lt;/p&gt;

&lt;p&gt;Are you able to advise on the reported drop due to the quotas landing?&lt;/p&gt;

&lt;p&gt;Alex/Nasf&lt;/p&gt;

&lt;p&gt;Any comments?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="58818" author="adilger" created="Fri, 17 May 2013 23:22:29 +0000"  >&lt;p&gt;This is a previously known problem discussed in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt;.  The &quot;on-by-default&quot; quota accounting introduced serialization in the quota layer that broke the SMP scaling optimizations done in the 2.3 code.  This wasn&apos;t fixed until v2_3_63_0-35-g6df197d (&lt;a href=&quot;http://review.whamcloud.com/5010&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5010&lt;/a&gt;), so this will hide any regressions in the metadata performance when testing on a faster system, unless the quota feature is disabled on the MDS (tune2fs -O ^quota /dev/mdt).&lt;/p&gt;

&lt;p&gt;It may well be that there is still a performance impact from the &quot;on-by-default&quot; quota accounting, which is worthwhile to test before trying to find some other cause for this regression.&lt;/p&gt;</comment>
                            <comment id="59081" author="mdiep" created="Wed, 22 May 2013 16:59:55 +0000"  >&lt;p&gt;oprofile data for mds-survey run:&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 09:45:01 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 39545.05 &lt;span class=&quot;error&quot;&gt;&amp;#91;39545.05,39545.05&amp;#93;&lt;/span&gt; lookup 2577588.87 &lt;span class=&quot;error&quot;&gt;&amp;#91;2577588.87,2577588.87&amp;#93;&lt;/span&gt; md_getattr 1247558.37 &lt;span class=&quot;error&quot;&gt;&amp;#91;1247558.37,1247558.37&amp;#93;&lt;/span&gt; setxattr 57218.08 &lt;span class=&quot;error&quot;&gt;&amp;#91;57218.08,57218.08&amp;#93;&lt;/span&gt; destroy 33400.55 &lt;span class=&quot;error&quot;&gt;&amp;#91;33400.55,33400.55&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;/p&gt;</comment>
                            <comment id="59108" author="mdiep" created="Wed, 22 May 2013 20:40:56 +0000"  >&lt;p&gt;Here is some info about turn off quota:&lt;/p&gt;

&lt;p&gt;quota on (default)&lt;/p&gt;


&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:09:18 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 44082.00 &lt;span class=&quot;error&quot;&gt;&amp;#91;44082.00,44082.00&amp;#93;&lt;/span&gt; lookup 2355576.51 &lt;span class=&quot;error&quot;&gt;&amp;#91;2355576.51,2355576.51&amp;#93;&lt;/span&gt; md_getattr 1223878.40 &lt;span class=&quot;error&quot;&gt;&amp;#91;1223878.40,1223878.40&amp;#93;&lt;/span&gt; setxattr 29649.06 &lt;span class=&quot;error&quot;&gt;&amp;#91;29649.06,29649.06&amp;#93;&lt;/span&gt; destroy 46921.25 &lt;span class=&quot;error&quot;&gt;&amp;#91;46921.25,46921.25&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:10:48 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 44897.20 &lt;span class=&quot;error&quot;&gt;&amp;#91;44897.20,44897.20&amp;#93;&lt;/span&gt; lookup 2659938.47 &lt;span class=&quot;error&quot;&gt;&amp;#91;2659938.47,2659938.47&amp;#93;&lt;/span&gt; md_getattr 1321129.64 &lt;span class=&quot;error&quot;&gt;&amp;#91;1321129.64,1321129.64&amp;#93;&lt;/span&gt; setxattr 57581.92 &lt;span class=&quot;error&quot;&gt;&amp;#91;57581.92,57581.92&amp;#93;&lt;/span&gt; destroy 35684.58 &lt;span class=&quot;error&quot;&gt;&amp;#91;35684.58,35684.58&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:11:32 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 43014.95 &lt;span class=&quot;error&quot;&gt;&amp;#91;43014.95,43014.95&amp;#93;&lt;/span&gt; lookup 2802301.45 &lt;span class=&quot;error&quot;&gt;&amp;#91;2802301.45,2802301.45&amp;#93;&lt;/span&gt; md_getattr 1348641.14 &lt;span class=&quot;error&quot;&gt;&amp;#91;1348641.14,1348641.14&amp;#93;&lt;/span&gt; setxattr 32394.97 &lt;span class=&quot;error&quot;&gt;&amp;#91;32394.97,32394.97&amp;#93;&lt;/span&gt; destroy 39988.76 &lt;span class=&quot;error&quot;&gt;&amp;#91;39988.76,39988.76&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:12:10 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 42977.31 &lt;span class=&quot;error&quot;&gt;&amp;#91;42977.31,42977.31&amp;#93;&lt;/span&gt; lookup 2828152.94 &lt;span class=&quot;error&quot;&gt;&amp;#91;2828152.94,2828152.94&amp;#93;&lt;/span&gt; md_getattr 1357190.19 &lt;span class=&quot;error&quot;&gt;&amp;#91;1357190.19,1357190.19&amp;#93;&lt;/span&gt; setxattr 34235.71 &lt;span class=&quot;error&quot;&gt;&amp;#91;34235.71,34235.71&amp;#93;&lt;/span&gt; destroy 48435.62 &lt;span class=&quot;error&quot;&gt;&amp;#91;48435.62,48435.62&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:12:53 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 42984.65 &lt;span class=&quot;error&quot;&gt;&amp;#91;42984.65,42984.65&amp;#93;&lt;/span&gt; lookup 2782484.74 &lt;span class=&quot;error&quot;&gt;&amp;#91;2782484.74,2782484.74&amp;#93;&lt;/span&gt; md_getattr 1344989.45 &lt;span class=&quot;error&quot;&gt;&amp;#91;1344989.45,1344989.45&amp;#93;&lt;/span&gt; setxattr 58604.47 &lt;span class=&quot;error&quot;&gt;&amp;#91;58604.47,58604.47&amp;#93;&lt;/span&gt; destroy 34678.90 &lt;span class=&quot;error&quot;&gt;&amp;#91;34678.90,34678.90&amp;#93;&lt;/span&gt;&lt;br/&gt;
done!&lt;/p&gt;

&lt;p&gt;quota off via tuners -O^quota &amp;lt;dev&amp;gt;&lt;/p&gt;


&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:16:30 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 57635.67 &lt;span class=&quot;error&quot;&gt;&amp;#91;57635.67,57635.67&amp;#93;&lt;/span&gt; lookup 2949092.93 &lt;span class=&quot;error&quot;&gt;&amp;#91;2949092.93,2949092.93&amp;#93;&lt;/span&gt; md_getattr 1439917.94 &lt;span class=&quot;error&quot;&gt;&amp;#91;1439917.94,1439917.94&amp;#93;&lt;/span&gt; setxattr 59857.89 &lt;span class=&quot;error&quot;&gt;&amp;#91;59857.89,59857.89&amp;#93;&lt;/span&gt; destroy 58406.76 &lt;span class=&quot;error&quot;&gt;&amp;#91;58406.76,58406.76&amp;#93;&lt;/span&gt; &lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:16:53 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 57729.97 &lt;span class=&quot;error&quot;&gt;&amp;#91;57729.97,57729.97&amp;#93;&lt;/span&gt; lookup 2745549.65 &lt;span class=&quot;error&quot;&gt;&amp;#91;2745549.65,2745549.65&amp;#93;&lt;/span&gt; md_getattr 1450702.84 &lt;span class=&quot;error&quot;&gt;&amp;#91;1450702.84,1450702.84&amp;#93;&lt;/span&gt; setxattr 31255.98 &lt;span class=&quot;error&quot;&gt;&amp;#91;31255.98,31255.98&amp;#93;&lt;/span&gt; destroy 71909.76 &lt;span class=&quot;error&quot;&gt;&amp;#91;71909.76,71909.76&amp;#93;&lt;/span&gt; &lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:17:16 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 57145.08 &lt;span class=&quot;error&quot;&gt;&amp;#91;57145.08,57145.08&amp;#93;&lt;/span&gt; lookup 2610389.07 &lt;span class=&quot;error&quot;&gt;&amp;#91;2610389.07,2610389.07&amp;#93;&lt;/span&gt; md_getattr 1456521.11 &lt;span class=&quot;error&quot;&gt;&amp;#91;1456521.11,1456521.11&amp;#93;&lt;/span&gt; setxattr 31354.29 &lt;span class=&quot;error&quot;&gt;&amp;#91;31354.29,31354.29&amp;#93;&lt;/span&gt; destroy 75608.29 &lt;span class=&quot;error&quot;&gt;&amp;#91;75608.29,75608.29&amp;#93;&lt;/span&gt; &lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:17:55 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 53349.04 &lt;span class=&quot;error&quot;&gt;&amp;#91;53349.04,53349.04&amp;#93;&lt;/span&gt; lookup 2678071.11 &lt;span class=&quot;error&quot;&gt;&amp;#91;2678071.11,2678071.11&amp;#93;&lt;/span&gt; md_getattr 1390181.88 &lt;span class=&quot;error&quot;&gt;&amp;#91;1390181.88,1390181.88&amp;#93;&lt;/span&gt; setxattr 34123.68 &lt;span class=&quot;error&quot;&gt;&amp;#91;34123.68,34123.68&amp;#93;&lt;/span&gt; destroy 148934.27 &lt;span class=&quot;error&quot;&gt;&amp;#91;148934.27,148934.27&amp;#93;&lt;/span&gt; &lt;br/&gt;
done!&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@mds03 lu3305&amp;#93;&lt;/span&gt;# file_count=250000 thrlo=256 thrhi=256 /usr/bin/mds-survey&lt;br/&gt;
Wed May 22 13:18:31 PDT 2013 /usr/bin/mds-survey from mds03&lt;br/&gt;
mdt 1 file  250000 dir  256 thr  256 create 54390.24 &lt;span class=&quot;error&quot;&gt;&amp;#91;54390.24,54390.24&amp;#93;&lt;/span&gt; lookup 2724295.09 &lt;span class=&quot;error&quot;&gt;&amp;#91;2724295.09,2724295.09&amp;#93;&lt;/span&gt; md_getattr 1428483.22 &lt;span class=&quot;error&quot;&gt;&amp;#91;1428483.22,1428483.22&amp;#93;&lt;/span&gt; setxattr 34446.16 &lt;span class=&quot;error&quot;&gt;&amp;#91;34446.16,34446.16&amp;#93;&lt;/span&gt; destroy 154758.66 &lt;span class=&quot;error&quot;&gt;&amp;#91;154758.66,154758.66&amp;#93;&lt;/span&gt; &lt;br/&gt;
done!&lt;/p&gt;</comment>
                            <comment id="59136" author="niu" created="Thu, 23 May 2013 03:05:38 +0000"  >&lt;p&gt;From Minh&apos;s result we can see: because of quota file updating, when testing 256 threads over 256 directories (1 thread per directory, no contention on parent directory updating), create/unlink of w/o quota is faster than create/unlink with quota on. I think the oprofile data confirms it:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Counted CPU_CLK_UNHALTED events (Clock cycles when not halted) with a unit mask of 0x00 (No unit mask) count 100000
samples  %        image name               app name                 symbol name
2276160  46.2251  vmlinux                  vmlinux                  dqput
963873   19.5747  vmlinux                  vmlinux                  dqget
335277    6.8089  ldiskfs                  ldiskfs                  /ldiskfs
258028    5.2401  vmlinux                  vmlinux                  dquot_mark_dquot_dirty
110819    2.2506  osd_ldiskfs              osd_ldiskfs              /osd_ldiskfs
76925     1.5622  obdclass                 obdclass                 /obdclass
58193     1.1818  mdd                      mdd                      /mdd
41931     0.8516  vmlinux                  vmlinux                  __find_get_block
32408     0.6582  lod                      lod                      /lod
20711     0.4206  jbd2.ko                  jbd2.ko                  jbd2_journal_add_journal_head
18598     0.3777  jbd2.ko                  jbd2.ko                  do_get_write_access
18579     0.3773  vmlinux                  vmlinux                  __find_get_block_slow
18364     0.3729  libcfs                   libcfs                   /libcfs
17833     0.3622  oprofiled                oprofiled                /usr/bin/oprofiled
17472     0.3548  vmlinux                  vmlinux                  mutex_lock
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I&apos;m not sure if we can still improve the performance (with quota) further in this respect, because single quota file updating can always be the bottleneck.&lt;/p&gt;</comment>
                            <comment id="59138" author="niu" created="Thu, 23 May 2013 03:32:31 +0000"  >&lt;p&gt;Look closer into the dqget()/dqput(), I realized that there is still quite a few global locks in quota code: dq_list_lock, dq_state_lock, dq_data_lock. The fix of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt; only removes the global lock of dqptr_sem, which has the most significant impact on performace. Removing all of  the quota global locks requires lots of changes in VFS code, that isn&apos;t a small project, maybe we should open a new project for further release?&lt;/p&gt;</comment>
                            <comment id="59149" author="adilger" created="Thu, 23 May 2013 07:21:09 +0000"  >&lt;p&gt;I find it strange that dqget() is called 2M times, but it only looks like 20k blocks are being allocated (based on the ldiskfs an jbd2 call counts).  Before trying to optimize the speed of that function, it is probably better to reduce the number of times it is called?&lt;/p&gt;

&lt;p&gt;It is also a case where the same quota entry is being accessed for every call (same UID and GID each time), so I wonder if that common case could be optimized in some way?&lt;/p&gt;

&lt;p&gt;Are any of these issues fixed in the original quota patches?&lt;/p&gt;

&lt;p&gt;Unfortunately, since all of the threads are contending to update the same record, there isn&apos;t an easy way to reduce contention.  The only thing I can think of is to have a journal pre-commit callback that does only a single quota update to disk per transaction, and uses percpu counters for the per-quota-per-transaction updates in memory.  That would certainly avoid contention, and is no less correct in the face of a crash.  No idea how easy that would be to implement. &lt;/p&gt;</comment>
                            <comment id="59152" author="niu" created="Thu, 23 May 2013 09:09:46 +0000"  >&lt;p&gt;dqget()/dqput() is mainly to get/drop reference on the in-memory per-id data of dquot, and it acquires global locks like lock dq_list_lock &amp;amp; dq_state_lock, (since it will lookup the dquot list and do some state checking) so contention on those global locks could be severe in the test case. If we can replace them with RCU or read/write lock, things will be better.&lt;/p&gt;

&lt;p&gt;I heard from Lai that there were some old patches which tried to remove those global locks, but it didn&apos;t gain much interest of community and not reviewed. Lai, could you comment on this?&lt;/p&gt;

&lt;p&gt;Regarding the quota record commit (mark_dquot_dirty() -&amp;gt; ext4_mark_dquot_dirty() -&amp;gt; ext4_write_dquot() -&amp;gt; dquot_commit(), which should happen along with each transaction), it does require global locks: dqio_mutex &amp;amp; dq_list_lock, but surprisingly, I didn&apos;t see it in the top samples of oprofile, it might just because the dqget()/dqput() calls are much more than dquot commit calls? Once we resolved the bottleneck in dqget()/dqput(), the contention in dquot commit could probably come to light.&lt;/p&gt;</comment>
                            <comment id="59227" author="niu" created="Fri, 24 May 2013 03:18:56 +0000"  >&lt;p&gt;Instead of eliminate the global locks entirely, maybe a small fix in dquot_initialize() could relieve the contenion caused by dqget()/dqput(): In dquot_initialize(), we&apos;d call dqget() only when i_dquot not initialized, which can avoid 2 pair of dqget()/dqput() in most case. I&apos;ll propose a patch soon.&lt;/p&gt;</comment>
                            <comment id="59315" author="adilger" created="Fri, 24 May 2013 23:00:26 +0000"  >&lt;p&gt;Niu&apos;s patch is at &lt;a href=&quot;http://review.whamcloud.com/6440&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6440&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Minh, would you be able to run another set of tests with the latest patch applied, and produce a graph like:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.hpdd.intel.com/secure/attachment/12415/mdtest_create.png&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/secure/attachment/12415/mdtest_create.png&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;so it is easier to see what the differences are?  Presumably with 5 runs it would be useful to plot the standard deviation, since I see from the text results you posted above that the performance can vary dramatically between runs.&lt;/p&gt;</comment>
                            <comment id="59318" author="mdiep" created="Fri, 24 May 2013 23:34:07 +0000"  >&lt;p&gt;yes, will do when the cluster&apos;s IB network is back online next week&lt;/p&gt;</comment>
                            <comment id="59429" author="simmonsja" created="Tue, 28 May 2013 14:25:33 +0000"  >&lt;p&gt;This patch will need to be port to SLES11 SP&lt;span class=&quot;error&quot;&gt;&amp;#91;1/2&amp;#93;&lt;/span&gt; as well. Later in the week I can include it in the patch.&lt;/p&gt;</comment>
                            <comment id="59433" author="adilger" created="Tue, 28 May 2013 14:52:06 +0000"  >&lt;p&gt;James, please submit the SLES changes as a separate patch. Since this doesn&apos;t affect the API, the two changes do not need to be in the same commit.  If the other patch needs to be refreshed for some other reason they can be merged. &lt;/p&gt;</comment>
                            <comment id="59435" author="simmonsja" created="Tue, 28 May 2013 15:27:04 +0000"  >&lt;p&gt;Fine with me.&lt;/p&gt;</comment>
                            <comment id="59998" author="mdiep" created="Tue, 4 Jun 2013 20:51:47 +0000"  >&lt;p&gt;performance data for the patch&lt;/p&gt;</comment>
                            <comment id="60008" author="niu" created="Wed, 5 Jun 2013 03:59:36 +0000"  >&lt;p&gt;Thanks a lot, Minh.&lt;/p&gt;

&lt;p&gt;Looks the patch improves create/rm performance overall, but there are something strange in the figure that I don&apos;t know why:&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;The stat performance is getting worse with patch (or disable quota), I don&apos;t how the quota code can affect the read-only operations. Maybe we need to collect some oprofile data to investigate this further. (for both 2.4 &amp;amp; patched 2.4);&lt;/li&gt;
&lt;/ul&gt;


&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;create/rm performance drops a lot on 3 threads;&lt;/li&gt;
&lt;/ul&gt;


&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;For the per proc create/rm with 4 threads, patched 2.4 (and disable quota) is even worse than standard 2.4, looks it&apos;s same with what Siyao discovered in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt; (unlink getting worse with 32 threads when disabled quota), contention on a global semaphore is better than contention on several spin locks when the contention is heavy enough?&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;BTW: why we put the create &amp;amp; rm data into same figure? I think they are two distinct tests, aren&apos;t they?&lt;/p&gt;</comment>
                            <comment id="61872" author="niu" created="Mon, 8 Jul 2013 05:32:22 +0000"  >&lt;p&gt;I did few tests on Rosso cluster, the result is similar to what we got in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt;, except that the performance drop problem (with 32 threads) showed in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt; (with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt; patch) is resolved:&lt;/p&gt;

&lt;p&gt;patched:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;mdtest-1.8.3 was launched with 32 total task(s) on 1 nodes
Command line used: mdtest -d /mnt/ldiskfs -i 10 -n 25000 -u -F -r
Path: /mnt
FS: 19.7 GiB   Used FS: 17.5%   Inodes: 1.2 Mi   Used Inodes: 4.6%

32 tasks, 800000 files

SUMMARY: (of 10 iterations)
   Operation                  Max        Min       Mean    Std Dev
   ---------                  ---        ---       ----    -------
   File creation     :      0.000      0.000      0.000      0.000
   File stat         :      0.000      0.000      0.000      0.000
   File removal      :   4042.032   1713.613   2713.827    698.243
   Tree creation     :      0.000      0.000      0.000      0.000
   Tree removal      :      2.164      1.861      2.020      0.088

-- finished at 07/07/2013 20:37:36 --
CPU: Intel Sandy Bridge microarchitecture, speed 2.601e+06 MHz (estimated)
Counted CPU_CLK_UNHALTED events (Clock cycles when not halted) with a unit mask of 0x00 (No unit mask) count 100000
samples  %        app name                 symbol name
10826148 11.6911  vmlinux                  schedule
7089656   7.6561  vmlinux                  update_curr
6432166   6.9461  vmlinux                  sys_sched_yield
4384494   4.7348  vmlinux                  __audit_syscall_exit
4088507   4.4152  libc-2.12.so             sched_yield
3441346   3.7163  vmlinux                  system_call_after_swapgs
3337224   3.6038  vmlinux                  put_prev_task_fair
3244213   3.5034  vmlinux                  audit_syscall_entry
2844216   3.0715  vmlinux                  thread_return
2702323   2.9182  vmlinux                  rb_insert_color
2636798   2.8475  vmlinux                  native_read_tsc
2234644   2.4132  vmlinux                  sched_clock_cpu
2182744   2.3571  vmlinux                  native_sched_clock
2175482   2.3493  vmlinux                  hrtick_start_fair
2152807   2.3248  vmlinux                  pick_next_task_fair
2130024   2.3002  vmlinux                  set_next_entity
2099576   2.2673  vmlinux                  rb_erase
1790101   1.9331  vmlinux                  update_stats_wait_end
1777328   1.9193  vmlinux                  mutex_spin_on_owner
1701672   1.8376  vmlinux                  sysret_check
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;unpatched:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;mdtest-1.8.3 was launched with 32 total task(s) on 1 nodes
Command line used: mdtest -d /mnt/ldiskfs -i 10 -n 25000 -u -F -r
Path: /mnt
FS: 19.7 GiB   Used FS: 17.8%   Inodes: 1.2 Mi   Used Inodes: 4.6%

32 tasks, 800000 files

SUMMARY: (of 10 iterations)
   Operation                  Max        Min       Mean    Std Dev
   ---------                  ---        ---       ----    -------
   File creation     :      0.000      0.000      0.000      0.000
   File stat         :      0.000      0.000      0.000      0.000
   File removal      :   2816.345   1673.085   2122.347    342.119
   Tree creation     :      0.000      0.000      0.000      0.000
   Tree removal      :      2.296      0.111      1.361      0.866

-- finished at 07/07/2013 21:11:03 --
CPU: Intel Sandy Bridge microarchitecture, speed 2.601e+06 MHz (estimated)
Counted CPU_CLK_UNHALTED events (Clock cycles when not halted) with a unit mask of 0x00 (No unit mask) count 100000
samples  %        image name               app name                 symbol name
23218790 18.3914  vmlinux                  vmlinux                  dqput
9549739   7.5643  vmlinux                  vmlinux                  __audit_syscall_exit
9116086   7.2208  vmlinux                  vmlinux                  schedule
8290558   6.5669  vmlinux                  vmlinux                  dqget
5576620   4.4172  vmlinux                  vmlinux                  update_curr
5343755   4.2327  vmlinux                  vmlinux                  sys_sched_yield
3251018   2.5751  libc-2.12.so             libc-2.12.so             sched_yield
2907579   2.3031  vmlinux                  vmlinux                  system_call_after_swapgs
2854863   2.2613  vmlinux                  vmlinux                  put_prev_task_fair
2793392   2.2126  vmlinux                  vmlinux                  audit_syscall_entry
2723949   2.1576  vmlinux                  vmlinux                  kfree
2551007   2.0206  vmlinux                  vmlinux                  mutex_spin_on_owner
2406364   1.9061  vmlinux                  vmlinux                  rb_insert_color
2321179   1.8386  vmlinux                  vmlinux                  thread_return
2184031   1.7299  vmlinux                  vmlinux                  native_read_tsc
2002277   1.5860  vmlinux                  vmlinux                  dquot_mark_dquot_dirty
1990135   1.5764  vmlinux                  vmlinux                  native_sched_clock
1970544   1.5608  vmlinux                  vmlinux                  set_next_entity
1967852   1.5587  vmlinux                  vmlinux                  pick_next_task_fair
1966282   1.5575  vmlinux                  vmlinux                  dquot_commit
1966271   1.5575  vmlinux                  vmlinux                  sysret_check
1919524   1.5204  vmlinux                  vmlinux                  unroll_tree_refs
1811281   1.4347  vmlinux                  vmlinux                  sched_clock_cpu
1810278   1.4339  vmlinux                  vmlinux                  rb_erase
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The unlink rate speed increased ~28% with 32 threads, and the oprofile data shows contention on dq_list_lock in dqput() is alleviated a lot.&lt;/p&gt;

&lt;p&gt;I think we should take this patch as a supplement of fix &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2442&quot; title=&quot;metadata performance degradation on current master&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2442&quot;&gt;&lt;del&gt;LU-2442&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="65888" author="pjones" created="Thu, 5 Sep 2013 22:46:24 +0000"  >&lt;p&gt;Landed for 2.4.1 and 2.5&lt;/p&gt;</comment>
                            <comment id="66750" author="prakash" created="Mon, 16 Sep 2013 17:09:04 +0000"  >&lt;p&gt;It&apos;s hard to gather from the prior discussion, but is this the only patch that came out of this issue:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;commit 58d2a322589ec13ee3c585c13b1c83f429d946ce
Author: Niu Yawei &amp;lt;yawei.niu@intel.com&amp;gt;
Date:   Thu May 23 23:49:03 2013 -0400

    LU-3305 quota: avoid unnecessary dqget/dqput calls
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;? Thanks.&lt;/p&gt;</comment>
                            <comment id="66813" author="adilger" created="Tue, 17 Sep 2013 04:54:51 +0000"  >&lt;p&gt;There are two core kernel patches in the RHEL series on master that improve quota performance - both start with &quot;quota&quot;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10120">
                    <name>Blocker</name>
                                                                <inwardlinks description="is blocked by">
                                        <issuelink>
            <issuekey id="19150">LU-3396</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12665" name="Hyperion Performance 07 May 2013.xlsx" size="99415" author="cliffw" created="Thu, 9 May 2013 16:31:48 +0000"/>
                            <attachment id="12852" name="Opensfs Metadata Performance RC1.xlsx" size="61930" author="mdiep" created="Fri, 17 May 2013 16:45:06 +0000"/>
                            <attachment id="12993" name="Opensfs Metadata Performance quota patch.xlsx" size="61391" author="mdiep" created="Tue, 4 Jun 2013 20:51:47 +0000"/>
                            <attachment id="12817" name="Opensfs Metadata Performance.xlsx" size="51687" author="mdiep" created="Tue, 14 May 2013 16:56:08 +0000"/>
                            <attachment id="12826" name="Screen Shot 2013-05-15 at 2.18.52 PM.png" size="185107" author="liang" created="Wed, 15 May 2013 06:19:01 +0000"/>
                            <attachment id="12902" name="oprofile.tgz" size="61583" author="mdiep" created="Wed, 22 May 2013 16:59:55 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvqk7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8187</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>