<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:46:53 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4906] rm -rf triggers too much MDS_READPAGE</title>
                <link>https://jira.whamcloud.com/browse/LU-4906</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;On 2.5.57-79-ge7f99a5 I see that rm -rf on a directory with n regular files causes O&amp;#40;n&amp;#41; MDS_READPAGE requests to be sent. I ran the following on 2.4.3 and master:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;export MOUNT_2=y
llmount.sh

cd /mnt/lustre
echo clear | tee /proc/fs/lustre/mdc/*/stats
mkdir d
touch d/{0..255}
cat /proc/fs/lustre/mdc/*/stats

cd /mnt/lustre2
echo clear | tee /proc/fs/lustre/mdc/*/stats
rm -rf d
cat /proc/fs/lustre/mdc/*/stats
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On 2.4.3:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;## mkdir and touch
req_waittime              773 samples [usec] 65 55988 339933 3295555639
req_active                773 samples [reqs] 1 1 773 773
mds_close                 256 samples [usec] 65 486 58958 16078566
mds_reint                 257 samples [usec] 93 826 74764 29250026
ldlm_enqueue              259 samples [usec] 191 1423 150223 115570903
seq_query                 1 samples [usec] 55988 55988 55988 3134656144
## rm -rf
snapshot_time             1397509895.872324 secs.usecs
req_waittime              258 samples [usec] 50 1054 32830 6165600
req_active                258 samples [reqs] 1 1 258 258
ldlm_cancel               258 samples [usec] 50 1054 32830 6165600
snapshot_time             1397509895.872354 secs.usecs
req_waittime              524 samples [usec] 45 9854 212336 225452482
req_active                524 samples [reqs] 1 4 876 1660
mds_close                 1 samples [usec] 390 390 390 152100
mds_reint                 257 samples [usec] 331 9854 151751 209416267
mds_readpage              3 samples [usec] 271 323 902 272634
ldlm_enqueue              261 samples [usec] 45 736 59120 15595504
ldlm_cancel               2 samples [usec] 64 109 173 15977
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On master:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;## mkdir and touch:
snapshot_time             1397507941.992796 secs.usecs
snapshot_time             1397507941.992828 secs.usecs
req_waittime              1282 samples [usec] 50 2674 364172 203043372
req_active                1282 samples [reqs] 1 1 1282 1282
mds_close                 256 samples [usec] 61 640 50251 15984775
mds_reint                 257 samples [usec] 89 1045 60223 21230981
mds_getxattr              256 samples [usec] 50 658 38276 9345158
ldlm_enqueue              513 samples [usec] 82 2674 215422 156482458
## rm -rf
snapshot_time             1397507954.948995 secs.usecs
req_waittime              991 samples [usec] 31 5949 371017 322109413
req_active                991 samples [reqs] 1 9 2132 6404
mds_close                 1 samples [usec] 126 126 126 15876
mds_reint                 257 samples [usec] 168 5949 173654 221727790
mds_readpage              132 samples [usec] 158 2173 44232 21316906
mds_getxattr              60 samples [usec] 31 345 5769 828911
ldlm_enqueue              423 samples [usec] 44 2496 123479 70146809
ldlm_cancel               118 samples [usec] 65 891 23757 8073121
snapshot_time             1397507954.949096 secs.usecs
req_waittime              1 samples [usec] 108 108 108 11664
req_active                1 samples [reqs] 1 1 1 1
ldlm_cancel               1 samples [usec] 108 108 108 11664
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;(If you noticed that ldlm_enqueue and mds_reint are present, it&apos;s because I used &lt;a href=&quot;http://review.whamcloud.com/#/c/6223/1&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/6223/1&lt;/a&gt; which is awesome and has been landable for nearly one year (still on patch set 1), but nobody ever reviews it.)&lt;/p&gt;</description>
                <environment></environment>
        <key id="24209">LU-4906</key>
            <summary>rm -rf triggers too much MDS_READPAGE</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="di.wang">Di Wang</assignee>
                                    <reporter username="jhammond">John Hammond</reporter>
                        <labels>
                            <label>llite</label>
                            <label>mdc</label>
                            <label>readdir</label>
                    </labels>
                <created>Mon, 14 Apr 2014 21:24:23 +0000</created>
                <updated>Thu, 19 Jun 2014 19:44:41 +0000</updated>
                            <resolved>Thu, 19 Jun 2014 17:33:21 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="81581" author="adilger" created="Tue, 15 Apr 2014 03:01:47 +0000"  >&lt;p&gt;This is partly related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3308&quot; title=&quot;large readdir chunk size slows unlink/&amp;quot;rm -r&amp;quot; performance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3308&quot;&gt;LU-3308&lt;/a&gt;, which is causing too-large readdir() RPCs to be issued and slow down the rm process. It would be allowed for the readdir() of the rm process to cache the entries and not issue more RPCs, either in userspace or in the kernel.&lt;/p&gt;

&lt;p&gt;I&apos;ve never looked at the rm implementation, but I imagine it is complex by its very presence in GNU fileutils. &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3308&quot; title=&quot;large readdir chunk size slows unlink/&amp;quot;rm -r&amp;quot; performance&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3308&quot;&gt;LU-3308&lt;/a&gt; also describes the mechanism by which the kernel could cache the directory entries or pages on the file descriptor in the kernel even after the MDS revoked the DLM lock. &lt;/p&gt;</comment>
                            <comment id="81649" author="green" created="Tue, 15 Apr 2014 18:06:30 +0000"  >&lt;p&gt;Potentially this is also related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4367&quot; title=&quot;unlink performance regression on lustre-2.5.52 client&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4367&quot;&gt;&lt;del&gt;LU-4367&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I think rm was always redoing readdir after every unlink, we had a ticket about this in bugzilla even that I can find if pressed.&lt;br/&gt;
This is all thanks to us not implementing readdir caching on per fd basis, so every time you rm something, the readdir cache is invalidated and next readdir leads to lock refetch.&lt;br/&gt;
There&apos;s some glibc caching (that does getdirents64), but apparently it&apos;s no longer works or something, no idea why ad it&apos;s not affected by lustre at all.&lt;/p&gt;</comment>
                            <comment id="81650" author="adilger" created="Tue, 15 Apr 2014 18:10:48 +0000"  >&lt;p&gt;On 2.4.2 (FC12 userspace) I see strace only does a single readdir:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;strace rm -r /myth/tmp/ttt
:
:
getdents(3, /* 10 entries */, 32768)    = 240
newfstatat(3, &quot;2&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}, AT_SYMLINK_NOFOLLOW) = 0
stat(&quot;/myth/tmp/ttt/2&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}) = 0
access(&quot;/myth/tmp/ttt/2&quot;, W_OK)         = 0
unlinkat(3, &quot;2&quot;, 0)                     = 0
newfstatat(3, &quot;7&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}, AT_SYMLINK_NOFOLLOW) = 0
stat(&quot;/myth/tmp/ttt/7&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}) = 0
access(&quot;/myth/tmp/ttt/7&quot;, W_OK)         = 0
unlinkat(3, &quot;7&quot;, 0)                     = 0
newfstatat(3, &quot;4&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}, AT_SYMLINK_NOFOLLOW) = 0
stat(&quot;/myth/tmp/ttt/4&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}) = 0
access(&quot;/myth/tmp/ttt/4&quot;, W_OK)         = 0
unlinkat(3, &quot;4&quot;, 0)                     = 0
newfstatat(3, &quot;3&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}, AT_SYMLINK_NOFOLLOW) = 0
stat(&quot;/myth/tmp/ttt/3&quot;, {st_mode=S_IFREG|0664, st_size=0, ...}) = 0
access(&quot;/myth/tmp/ttt/3&quot;, W_OK)         = 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;What is the userspace being tested here, and is it the same for both tests?  What does the strace show for getdents() calls?&lt;/p&gt;</comment>
                            <comment id="81654" author="green" created="Tue, 15 Apr 2014 18:16:07 +0000"  >&lt;p&gt;so I just run strace on rm on lustre (current master) and I see getdents64 caching on the order of 1000 entries per call, btw.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;getdents(3, /* 1001 entries */, 32768)  = 32024
brk(0x15cf000)                          = 0x15cf000
brk(0x15f0000)                          = 0x15f0000
getdents(3, /* 0 entries */, 32768)     = 0
close(3)                                = 0
unlinkat(4, &quot;file221&quot;, 0)               = 0
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="81655" author="jhammond" created="Tue, 15 Apr 2014 18:16:18 +0000"  >&lt;p&gt;Oleg, I should have put more detail here. I was running strace alongside and rm -rf calls getdents twice, unlink 256 times, and rmdir once, it that order. It&apos;s the statahead thread that generates all but 2-3 of the readpage requests.&lt;/p&gt;</comment>
                            <comment id="81656" author="green" created="Tue, 15 Apr 2014 18:17:41 +0000"  >&lt;p&gt;ok, so it&apos;s statahead&apos;s fault, I see.&lt;/p&gt;</comment>
                            <comment id="82655" author="adilger" created="Mon, 28 Apr 2014 18:00:44 +0000"  >&lt;p&gt;Fan Yong, it seems that statahead is interacting badly with &quot;rm -r&quot;.  Is there some way to stop it from trying to prefetch the directory entries if the directory lock is being cancelled too often?&lt;/p&gt;</comment>
                            <comment id="82706" author="yong.fan" created="Tue, 29 Apr 2014 00:41:55 +0000"  >&lt;p&gt;The statahead behaviour has been changed after moving the directory page cache from LLITE to MDC for striped directory.&lt;/p&gt;

&lt;p&gt;Originally, the directory page cache is in LLITE, and the ldlm lock is only held when fetch the directory page from MDS. After that the directory ldlm lock will be released. And then the statahead thread will traversal the directory page without the directory ldlm lock, even if someone is performing &quot;rm -rf&quot; and cancel (or ELC) the directory ldlm lock from the client, the statahead still holds the page reference. It is not a serious issue for statahead to get &quot;-ENOENT&quot; because of some name entry has been removed, because it is statahead internal failure and invisible to the applications. The statahead thread still can go ahead to fee-fetch as much as possible. So there is no much directory ldlm lock ping-pong.&lt;/p&gt;

&lt;p&gt;But the situation has changed after moving the directory page to MDC. In current implementation, when the statahead traversal the directory, it does not hold the page reference, it needs to verify the directory ldlm lock for every name entry. That means there will be the directory ldlm lock ping-pong between the statahead thread and the &quot;rm -rf&quot; thread for every name entry.&lt;/p&gt;

&lt;p&gt;Summary, the original directory ldlm lock ping-pong is per page, the current directory ldlm lock ping-pong is per name entry. That is why the bad performance.&lt;/p&gt;</comment>
                            <comment id="82707" author="yong.fan" created="Tue, 29 Apr 2014 00:56:54 +0000"  >&lt;p&gt;Currently, the LLITE cannot be aware of the directory ldlm lock ping-pong which happened in MDC, so the statahead cannot know it should stop itself because of the bad performance unless someone indicates that.&lt;/p&gt;</comment>
                            <comment id="85291" author="adilger" created="Fri, 30 May 2014 19:00:52 +0000"  >&lt;p&gt;Di, it seems this is caused by the DNE2 readdir changes. Could you please look at this c&lt;/p&gt;</comment>
                            <comment id="85887" author="adilger" created="Thu, 5 Jun 2014 18:24:51 +0000"  >&lt;p&gt;I think the easiest and best solution here is to have llite cache the readdir directory pages on the file handle if the lock is cancelled, either until the file handle is closed or until it does rewinddir().  That avoids extra READPAGE RPCs for many reasons.&lt;/p&gt;

&lt;p&gt;From the readdir() and rewinddir() specifications in SUSv2: &lt;a href=&quot;http://pubs.opengroup.org/onlinepubs/007908799/xsh/rewinddir.html&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://pubs.opengroup.org/onlinepubs/007908799/xsh/rewinddir.html&lt;/a&gt;&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;If a file is removed from or added to the directory after the most recent call to opendir() or rewinddir(), whether a subsequent call to readdir_r() returns an entry for that file is unspecified.&lt;/p&gt;

&lt;p&gt;The readdir_r() function may buffer several directory entries per actual read operation; the readdir_r() function marks for update the st_atime field of the directory each time the directory is actually read.&lt;/p&gt;

&lt;p&gt;The rewinddir() function resets the position of the directory stream to which dirp refers to the beginning of the directory. It also causes the directory stream to refer to the current state of the corresponding directory, as a call to opendir() would have done.&lt;/p&gt;&lt;/blockquote&gt;</comment>
                            <comment id="85933" author="di.wang" created="Thu, 5 Jun 2014 22:16:28 +0000"  >&lt;p&gt;Hmm, for non-striped directory, it does not matter cached it on llite or mdc, IMHO. But for striped directory, caching the dir-entry page in MDC layer would help LMV to build the dir-entry page by hash-order from all of stripes.&lt;/p&gt;

&lt;p&gt;In current readdir implementation, it iterate the dir_entry one by one(i.e. go through the stack for each entry), which is the reason causes this performance issue. I am working on a patch to build the dir-entry page by page in LMV layer, which should resolve the issue. There are still some issue with the patch, I will post the patch soon.&lt;/p&gt;</comment>
                            <comment id="85969" author="di.wang" created="Fri, 6 Jun 2014 04:39:21 +0000"  >&lt;p&gt;With this patch &lt;a href=&quot;http://review.whamcloud.com/#/c/10622/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10622/&lt;/a&gt;&lt;br/&gt;
rm -rf dir with 10k files goes from 64 seconds to 8 seconds in my local test,&lt;br/&gt;
with the patch&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@mds tests]# ./createmany -o /mnt/lustre/test1/f- 10000
total: 10000 creates in 11.72 seconds: 853.50 creates/second
[root@mds tests]# time rm -rf /mnt/lustre/test1

real	0m8.151s
user	0m0.012s
sys	0m2.242s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;without patch&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@mds tests]# ./createmany -o /mnt/lustre/test1/f- 10000
total: 10000 creates in 11.98 seconds: 835.02 creates/second
[root@mds tests]# ls /mnt/lustre/test1/ | wc
  10000   10000   68890
[root@mds tests]# time rm -rf /mnt/lustre/test1

real	1m4.496s
user	0m0.014s
sys	0m3.832s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="86390" author="adilger" created="Wed, 11 Jun 2014 22:39:14 +0000"  >&lt;p&gt;This kind of performance test results should be great to include in the patch commit message.&lt;/p&gt;</comment>
                            <comment id="86406" author="di.wang" created="Thu, 12 Jun 2014 05:59:33 +0000"  >&lt;p&gt;I added another patch  &lt;a href=&quot;http://review.whamcloud.com/10695&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/10695&lt;/a&gt;  based on 10622,  to add striped entry page cache to master inode, please check.&lt;/p&gt;</comment>
                            <comment id="87060" author="jlevi" created="Thu, 19 Jun 2014 17:33:21 +0000"  >&lt;p&gt;Patch landed to Master. Remaining patch will be tracked under a new ticket.&lt;/p&gt;</comment>
                            <comment id="87081" author="bogl" created="Thu, 19 Jun 2014 19:44:41 +0000"  >&lt;p&gt;I very strongly suspect the landing of this fix has broken lustre builds on 3.12 kernels.  See comment in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4416&quot; title=&quot;support for 3.12 linux kernel&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4416&quot;&gt;&lt;del&gt;LU-4416&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="18787">LU-3308</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="22390">LU-4367</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="24199">LU-4902</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="25224">LU-5232</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwk33:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>13548</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>