<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:32:11 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3240] The link count is not updated after the mkdir</title>
                <link>https://jira.whamcloud.com/browse/LU-3240</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We sometimes saw incorrect link counts after the mkdir or remove directories.&lt;br/&gt;
The link counts seem not to be updated after create/remove directory.&lt;/p&gt;

&lt;p&gt;We created a simple reproducer for this problem below.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#!/bin/sh -x 

TESTDIR=/lustre/linkcount_test
[ -d &quot;$TESTDIR&quot; ] &amp;amp;&amp;amp; rm -rf $TESTDIR

mkdir $TESTDIR
cd $TESTDIR
ls
mkdir a b
ls
ls -adl $TESTDIR
rmdir b
ls -ald
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# ./test.sh
+ TESTDIR=/lustre/linkcount_test
+ &apos;[&apos; -d /lustre/linkcount_test &apos;]&apos;
+ rm -rf /lustre/linkcount_test
+ mkdir /lustre/linkcount_test
+ cd /lustre/linkcount_test
+ ls
+ mkdir a b
+ ls
a  b
+ ls -adl /lustre/linkcount_test
drwxr-xr-x 2 root root 4096 Apr 30 02:52 /lustre/linkcount_test
          ^^^
          This should be 4 after two directories created.
+ rmdir b
+ ls -ald
drwxr-xr-x 3 root root 4096 Apr 30 02:52 .
          ^^^
          Updated after an directory is removed.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This problem happens on b2_1, b2_2 and b2_3, but not happens if both server and clients are running with current master.&lt;br/&gt;
However, master (server) - b2_1(client), we see same problem.&lt;/p&gt;</description>
                <environment>RHEL6.3</environment>
        <key id="18608">LU-3240</key>
            <summary>The link count is not updated after the mkdir</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="ihara">Shuichi Ihara</reporter>
                        <labels>
                            <label>mn1</label>
                            <label>patch</label>
                    </labels>
                <created>Sun, 28 Apr 2013 22:25:46 +0000</created>
                <updated>Fri, 20 Dec 2013 21:07:51 +0000</updated>
                            <resolved>Fri, 11 Oct 2013 22:25:26 +0000</resolved>
                                    <version>Lustre 2.2.0</version>
                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.1.5</version>
                                    <fixVersion>Lustre 2.5.0</fixVersion>
                    <fixVersion>Lustre 2.4.2</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>24</watches>
                                                                            <comments>
                            <comment id="57213" author="ihara" created="Sun, 28 Apr 2013 23:24:53 +0000"  >&lt;p&gt;After analysis, we found which commit started not caused problem on current master. &lt;br/&gt;
commit 75ae281dac43534f65df0113a4bf5ccaf5aedca9 is related to this issue.&lt;/p&gt;

&lt;p&gt;Especially, following codes changes in lustre/llite/name.c affects to this problem. If MDS_INODELOCK_PERM is removed from the codes, we saw same problem.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;@@ -284,7 +284,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 
 		if (inode-&amp;gt;i_sb-&amp;gt;s_root &amp;amp;&amp;amp;
 		    inode != inode-&amp;gt;i_sb-&amp;gt;s_root-&amp;gt;d_inode &amp;amp;&amp;amp;
-		    (bits &amp;amp; MDS_INODELOCK_LOOKUP))
+		    (bits &amp;amp; (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)))
 			ll_invalidate_aliases(inode);
                 iput(inode);
                 break;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Here is test results.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;current master
# ./test.sh 
+ TESTDIR=/lustre/linkcount_test
+ &apos;[&apos; -d /lustre/linkcount_test &apos;]&apos;
+ rm -rf /lustre/linkcount_test
+ mkdir /lustre/linkcount_test
+ cd /lustre/linkcount_test
+ ls
+ mkdir a b
+ ls
a  b
+ ls -adl /lustre/linkcount_test
drwxr-xr-x 4 root root 4096 Apr 30 07:37 /lustre/linkcount_test
          ^^^
          the link count is updated correctly.
+ rmdir b
+ ls -ald
drwxr-xr-x 3 root root 4096 Apr 30 07:37 .
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;remove &quot;MDS_INODELOCK_PERM&quot; above codes and revert previous code on the client.
# ./test.sh
+ TESTDIR=/lustre/linkcount_test
+ &apos;[&apos; -d /lustre/linkcount_test &apos;]&apos;
+ rm -rf /lustre/linkcount_test
+ mkdir /lustre/linkcount_test
+ cd /lustre/linkcount_test
+ ls
+ mkdir a b
+ ls
a  b
+ ls -adl /lustre/linkcount_test
drwxr-xr-x 2 root root 4096 Apr 30 08:24 /lustre/linkcount_test
          ^^^
          not updated 
+ rmdir b
+ ls -ald
drwxr-xr-x 3 root root 4096 Apr 30 08:24 .
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We still don&apos;t know, why this is related to not cause this problem and in any case, what is root cause of this problem...&lt;/p&gt;</comment>
                            <comment id="57236" author="pjones" created="Mon, 29 Apr 2013 14:22:45 +0000"  >&lt;p&gt;Nathaniel&lt;/p&gt;

&lt;p&gt;Could you please look into this issue?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="57274" author="adilger" created="Mon, 29 Apr 2013 22:14:44 +0000"  >&lt;p&gt;I did a quick test on my 2.1.3 system and could reproduce this.  Fortunately, the link count is actually correct on another node and on disk, it is just a cache consistency issue on the client.  Note that the link count on the client is &quot;read only&quot;, so this could not lead to inconsistency on the server, but might cause programs like &quot;find&quot; or &quot;ls -R&quot; to miss scanning newly-created subdirectories.&lt;/p&gt;</comment>
                            <comment id="57277" author="ihara" created="Mon, 29 Apr 2013 22:41:23 +0000"  >&lt;p&gt;Andreas,&lt;br/&gt;
yes, the link count is correct if we checked it on another client. And when the cache is clear (lctl set_param ldlm.namespaces.&lt;b&gt;mdc&lt;/b&gt;.lru_size=clear) or umount/mount the Lustre, the link count is updated correctly.&lt;/p&gt;

&lt;p&gt;And, &quot;find&quot; command detects incorrect link count, it showed the following warning messages. We saw this issue at the production system often. This was original problem we found, then started analysis and making a producer..&lt;/p&gt;

&lt;p&gt;find: WARNING: Hard link count is wrong for `./test&apos; (saw only &lt;br/&gt;
st_nlink=5 but we already saw 3 subdirectories): this may be a bug in &lt;br/&gt;
your file system driver.  Automatically tur&lt;br/&gt;
ning on find&apos;s -noleaf option.  Earlier results may have failed to &lt;br/&gt;
include directories that should have been searched.&lt;/p&gt;</comment>
                            <comment id="57530" author="ihara" created="Thu, 2 May 2013 15:54:07 +0000"  >&lt;p&gt;Hi Nathaniel, Andreas, &lt;br/&gt;
Any ideas what this problem is? This quite happens at the production system..&lt;/p&gt;</comment>
                            <comment id="57747" author="adilger" created="Mon, 6 May 2013 18:01:19 +0000"  >&lt;p&gt;I think the root of the problem here is that creating a subdirectory should be revoking the MDS_INODELOCK_UPDATE bit (though allowing the MDS_INODELOCK_LOOKUP bit to remain on the client in a separate lock), and this needs to be re-fetched when doing a &quot;stat&quot; on the inode.  This &lt;em&gt;appears&lt;/em&gt; to be the case:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;    vfs_getattr()
        ll_getattr_it()
            ll_inode_revalidate_it(ibits=(MDS_INODELOCK_UPDATE | MDS_INODELOCK_LOOKUP))
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;but this isn&apos;t getting the correct link count from the MDS.  Just updating the link count on the creating client is not sufficient, since the same problem could exist on other clients also creating directories concurrently.&lt;/p&gt;</comment>
                            <comment id="58001" author="manisha" created="Thu, 9 May 2013 09:12:06 +0000"  >&lt;p&gt;By checking MDS_INODELOCK_UPDATE bit in ll_md_blocking_ast() will solve the issue. &lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (inode-&amp;gt;i_sb-&amp;gt;s_root &amp;amp;&amp;amp;
                    inode != inode-&amp;gt;i_sb-&amp;gt;s_root-&amp;gt;d_inode &amp;amp;&amp;amp;
                    (bits &amp;amp; (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE)))
                        ll_invalidate_aliases(inode);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The ll_invalidate_aliases()  will make the dentry invalid. Now as the part of revalidation , it will get the inode from MDS and will have correct nlink count.&lt;br/&gt;
I tried with this fix on mounting two clients creating directories inside same parent directory simultaneously. And it shows correct nlink count from both the clients.&lt;/p&gt;</comment>
                            <comment id="58097" author="adilger" created="Thu, 9 May 2013 23:21:22 +0000"  >&lt;p&gt;In addition to the question of whether this change will cause the inode to be invalidated, there is the separate question of whether it will remain cached on the client when the inode is not being modified...  Can you please improve the test case to check whether repeated &quot;stat&quot; calls on either mountpoint do not result in extra lock enqueues on the MDS.&lt;/p&gt;</comment>
                            <comment id="58133" author="manisha" created="Fri, 10 May 2013 12:21:40 +0000"  >&lt;p&gt;Hi Andreas,&lt;br/&gt;
I have performed some tests as you suggested. From the test results my observation is:&lt;br/&gt;
With the suggested fix , inode gets invalidated on mkdir. Then stat on the directory will revalidate the inode by fetching it from MDS and will update the client&apos;s local cache. On the repetitive stat do  not result in extra lock enqueues and uses the local cache inode only.  &lt;/p&gt;

&lt;p&gt;Following test results will verify the above claims.&lt;br/&gt;
(Client1 = C1, Client2 = C2)&lt;br/&gt;
Initially ldlm_stats is clean:&lt;br/&gt;
C1:&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/0&amp;#64;lo/ldlm_stats &lt;br/&gt;
snapshot_time             1368185120.170741 secs.usecs&lt;/p&gt;

&lt;p&gt;C2:&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/192.168.100.183&amp;#64;tcp/ldlm_stats &lt;br/&gt;
snapshot_time             1368185109.744530 secs.usecs&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Run stat on C1 which will bring inode to its local cache.&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;C1: stat /mnt/lustre (One lock request from C1 gets en-queued on mds)&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/0&amp;#64;lo/ldlm_stats &lt;br/&gt;
snapshot_time             1368185270.884595 secs.usecs&lt;br/&gt;
ldlm_enqueue              1 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;mkdir on C2 which will make lock cancel on client C1 and will invalidate the inode on client C2.&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;C2: mkdir /mnt/lustre/a (One lock request from C2 gets en-queued on mds and Lock cancel request from C1)&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/192.168.100.183&amp;#64;tcp/ldlm_stats &lt;br/&gt;
snapshot_time             1368185411.223637 secs.usecs&lt;br/&gt;
ldlm_enqueue              1 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/0&amp;#64;lo/ldlm_stats &lt;br/&gt;
snapshot_time             1368185406.840111 secs.usecs&lt;br/&gt;
ldlm_enqueue              1 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;br/&gt;
ldlm_cancel               1 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;br/&gt;
ldlm_bl_callback          1 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;Again stat on C2 will revalidate the inode and will get updated inode contents from MDS:&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@pun-lsfs21-node07 lustre-release&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/mdt/lustre-MDT0000/exports/192.168.100.183&amp;#64;tcp/ldlm_stats &lt;br/&gt;
snapshot_time             1368187928.914755 secs.usecs&lt;br/&gt;
ldlm_enqueue              2 samples &lt;span class=&quot;error&quot;&gt;&amp;#91;reqs&amp;#93;&lt;/span&gt;&lt;/p&gt;
</comment>
                            <comment id="58599" author="shadow" created="Wed, 15 May 2013 18:16:56 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (inode-&amp;gt;i_sb-&amp;gt;s_root &amp;amp;&amp;amp;
                    inode != inode-&amp;gt;i_sb-&amp;gt;s_root-&amp;gt;d_inode &amp;amp;&amp;amp;
                    (bits &amp;amp; (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE)))
                        ll_invalidate_aliases(inode);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;using a update bit to invalidate aliases is wrong. because it&apos;s bit should be protect an attributes, not a name - but you tried to remove whole information from a dcache, in that case new lookup will triggered. it&apos;s solve that particular issue, but too bad at all as need to send an additional rpc in case we need just lookup name without attributes (or need minimal attributes subset to process open an security checks).&lt;/p&gt;

&lt;p&gt;that bug may reproduced easy&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;tmp/test&amp;gt; mkdir newdir; ls; find . -print
newdir
.
find: WARNING: Hard link count is wrong &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; `.&lt;span class=&quot;code-quote&quot;&gt;&apos; (saw only st_nlink=2 but we already saw 0 subdirectories): &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; may be a bug in your file system driver. Automatically turning on find&apos;&lt;/span&gt;s -noleaf option. Earlier results may have failed to include directories that should have been searched.
./newdir
tmp/test&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;Xyratex plan to start work on that issue tomorrow.&lt;/p&gt;</comment>
                            <comment id="58601" author="jhammond" created="Wed, 15 May 2013 18:24:13 +0000"  >&lt;p&gt;Working on another issue I noticed that ll_have_me_lock() cannot clear MDS_INODELOCK_PERM from *bits since MDS_INODELOCK_MAXSHIFT is 4, while MDS_INODELOCK_PERM is 0x10:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;int ll_have_md_lock(struct inode *inode, __u64 *bits,  ldlm_mode_t l_req_mode)
{
        struct lustre_handle lockh;
        ldlm_policy_data_t policy;
        ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
                                (LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
        struct lu_fid *fid;
        __u64 flags;
        int i;
        ENTRY;

        if (!inode)
               RETURN(0);

        fid = &amp;amp;ll_i2info(inode)-&amp;gt;lli_fid;
        CDEBUG(D_INFO, &quot;trying to match res &quot;DFID&quot; mode %s\n&quot;, PFID(fid),
               ldlm_lockname[mode]);

        flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
        for (i = 0; i &amp;lt; MDS_INODELOCK_MAXSHIFT &amp;amp;&amp;amp; *bits != 0; i++) {
                policy.l_inodebits.bits = *bits &amp;amp; (1 &amp;lt;&amp;lt; i);
                if (policy.l_inodebits.bits == 0)
                        continue;
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="58642" author="shadow" created="Thu, 16 May 2013 09:03:09 +0000"  >&lt;p&gt;Based in Panda (Andrew Perepechko &amp;lt;andrew_perepechko@xyratex.com&amp;gt;) initial investigation problem is &lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;client have both open and lookup|update lock&lt;/li&gt;
	&lt;li&gt;creating file drops a lookup|update lock&lt;/li&gt;
	&lt;li&gt;readdir open a file via ll_local_open()&lt;/li&gt;
	&lt;li&gt;readdir get a UPDATE lock to protect directory page cache, but forget to update a inode attributes as someone forget about update lock protects attributes except needs to open and security checks.&lt;/li&gt;
	&lt;li&gt;mdt lost a part of intent handler (exist in 1.8 code) so don&apos;t send a inode body over network with update lock.&lt;br/&gt;
-.....&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;BUG hit.&lt;/p&gt;

&lt;p&gt;so.. drop openlock will caused full revalidate inode and ll_intent_open call so attributes will updates correctly.&lt;br/&gt;
or (and) ll_readdir need correct invalidate function to update inode body after lock grant, and need invested mdt part. 1.8 have full intent with lock replace and sending inode body similar to getattr - but 2.x lost that part.&lt;/p&gt;


&lt;p&gt;ps. readdir forget update a inode attributes from a 1.4 (i don&apos;t have early version on quick access).&lt;/p&gt;</comment>
                            <comment id="58736" author="shadow" created="Fri, 17 May 2013 04:58:19 +0000"  >&lt;p&gt;in discussion with green, we found - readdir take a LCK_CR mode lock. it mode should be don&apos;t matched in revalidate (in theory), if we will able remove skipping revalidate we will have valid attributes in stat() syscall.&lt;/p&gt;</comment>
                            <comment id="58740" author="shadow" created="Fri, 17 May 2013 06:58:39 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;bash-3.2$ git diff
diff --git a/lustre/llite/dir.c b/lustre/llite/dir.c
index 24ba16d..77ee138 100644
--- a/lustre/llite/dir.c
+++ b/lustre/llite/dir.c
@@ -368,7 +368,7 @@ struct page *ll_get_dir_page(struct file *filp, struct inode *dir, __u64 hash,
         struct ll_inode_info *lli = ll_i2info(dir);
         &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; hash64 = ll_i2sbi(dir)-&amp;gt;ll_flags &amp;amp; LL_SBI_64BIT_HASH;
 
-        mode = LCK_PR;
+        mode = LCK_CR;
         rc = md_lock_match(ll_i2sbi(dir)-&amp;gt;ll_md_exp, LDLM_FL_BLOCK_GRANTED,
                            ll_inode2fid(dir), LDLM_IBITS, &amp;amp;policy, mode, &amp;amp;lockh);
         &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (!rc) {
diff --git a/lustre/mdc/mdc_locks.c b/lustre/mdc/mdc_locks.c
index 84389c8..21dbecd 100644
--- a/lustre/mdc/mdc_locks.c
+++ b/lustre/mdc/mdc_locks.c
@@ -923,15 +923,19 @@ &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
         struct lustre_handle lockh;
         ldlm_policy_data_t policy;
         ldlm_mode_t mode;
+       uint64_t lock_mode = LCK_PR|LCK_PW;
         ENTRY;
 
         fid_build_reg_res_name(fid, &amp;amp;res_id);
         policy.l_inodebits.bits = (it-&amp;gt;it_op == IT_GETATTR) ?
                                   MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
 
+       /* don&apos;t match agains open lock, that is have no guaranted to atributes
+        * to be valid */
+       lock_mode |= it-&amp;gt;it_op == IT_GETATTR ? 0 : LCK_CW | LCK_CR;
         mode = ldlm_lock_match(exp-&amp;gt;exp_obd-&amp;gt;obd_namespace,
                                LDLM_FL_BLOCK_GRANTED, &amp;amp;res_id, LDLM_IBITS,
-                               &amp;amp;policy, LCK_CR|LCK_CW|LCK_PR|LCK_PW, &amp;amp;lockh, 0);
+                               &amp;amp;policy, lock_mode, &amp;amp;lockh, 0);
         &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (mode) {
                 it-&amp;gt;d.lustre.it_lock_handle = lockh.cookie;
                 it-&amp;gt;d.lustre.it_lock_mode = mode;


&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;that patch restore ~1.8.6 behavior (request LCK_CR instead of LCK_PR in readdir) and fix issue when attributes request matched against openlock.&lt;/p&gt;

&lt;p&gt;my results after it&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;[root@rhel6-64 tests]# 
[root@rhel6-64 tmp]# sh bug1.sh 
a  b
drwxr-xr-x 4 root root 4096 May 17 09:53 /mnt/lustre/linkcount_test
drwxr-xr-x 3 root root 4096 May 17 09:53 .
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Manisha,&lt;/p&gt;

&lt;p&gt;may you test that patch and verify it?&lt;/p&gt;</comment>
                            <comment id="58744" author="shadow" created="Fri, 17 May 2013 07:49:18 +0000"  >&lt;p&gt;first part of bug introduced in cmd3 branch.&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
Index: dir.c
===================================================================
RCS file: /lustre/lustre-core/llite/dir.c,v
retrieving revision 1.54.2.7.10.6.2.1.4.4.2.38
retrieving revision 1.54.2.7.10.6.2.1.4.4.2.39
diff -u -r1.54.2.7.10.6.2.1.4.4.2.38 -r1.54.2.7.10.6.2.1.4.4.2.39
--- dir.c       13 Oct 2006 12:32:11 -0000      1.54.2.7.10.6.2.1.4.4.2.38
+++ dir.c       20 Oct 2006 11:27:35 -0000      1.54.2.7.10.6.2.1.4.4.2.39
@@ -278,14 +278,20 @@
         ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_UPDATE} };
         struct address_space *mapping = dir-&amp;gt;i_mapping;
         struct lustre_handle lockh;
-        struct page *page;
         struct lu_dirpage *dp;
+        struct page *page;
+        ldlm_mode_t mode;
         &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; rc;
         __u32 start;
         __u32 end;
 
+#ifdef CONFIG_PDIROPS
+        mode = LCK_PR;
+#&lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt;
+        mode = LCK_CR;
+#endif
         rc = md_lock_match(ll_i2sbi(dir)-&amp;gt;ll_md_exp, LDLM_FL_BLOCK_GRANTED,
-                           ll_inode2fid(dir), LDLM_IBITS, &amp;amp;policy, LCK_CR, &amp;amp;lockh);
+                           ll_inode2fid(dir), LDLM_IBITS, &amp;amp;policy, mode, &amp;amp;lockh);
         &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (!rc) {
                 struct lookup_intent it = { .it_op = IT_READDIR };
                 struct ptlrpc_request *request;
@@ -296,7 +302,7 @@
                         &lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; ERR_PTR(-ENOMEM);
 
                 rc = md_enqueue(ll_i2sbi(dir)-&amp;gt;ll_md_exp, LDLM_IBITS, &amp;amp;it,
-                                LCK_CR, op_data, &amp;amp;lockh, NULL, 0,
+                                mode, op_data, &amp;amp;lockh, NULL, 0,
                                 ldlm_completion_ast, ll_md_blocking_ast, dir,
                                 0);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;and put in HEAD with b_post_cmd3 merge.&lt;/p&gt;</comment>
                            <comment id="59146" author="manisha" created="Thu, 23 May 2013 06:05:24 +0000"  >&lt;p&gt;Verified the patch , works fine.&lt;/p&gt;</comment>
                            <comment id="59319" author="adilger" created="Fri, 24 May 2013 23:48:52 +0000"  >&lt;p&gt;The patch in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3385&quot; title=&quot;missing permission lock check in ll_have_md_lock.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3385&quot;&gt;&lt;del&gt;LU-3385&lt;/del&gt;&lt;/a&gt; &lt;a href=&quot;http://review.whamcloud.com/6438&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6438&lt;/a&gt; fixes the problem with MDS_INODELOCK_MAXSHIFT in 2.4, though I don&apos;t think this is relevant for 2.1.  This bug was introduced in 2.2 via commit cc283673 (&lt;a href=&quot;http://review.whamcloud.com/1170&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/1170&lt;/a&gt;), but wasn&apos;t causing any problems until 2.4, because the higher MDS_INODELOCK bits weren&apos;t used on the clients.&lt;/p&gt;</comment>
                            <comment id="59320" author="adilger" created="Fri, 24 May 2013 23:53:33 +0000"  >&lt;p&gt;Shadow, can you please submit that patch to Gerrit?&lt;/p&gt;</comment>
                            <comment id="59366" author="aboyko" created="Mon, 27 May 2013 10:44:50 +0000"  >&lt;p&gt;I have submitted patch &lt;a href=&quot;http://review.whamcloud.com/6460&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6460&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="60091" author="paf" created="Thu, 6 Jun 2013 14:40:59 +0000"  >&lt;p&gt;We&apos;re monitoring this one from the Cray side...  I see the patch failed one of the sanityn tests, specifically test_13.&lt;/p&gt;

&lt;p&gt;That&apos;s &quot;test directory page revocation&quot;, and at least seems relevant to this patch.  Thoughts/updates?&lt;/p&gt;</comment>
                            <comment id="60341" author="shadow" created="Tue, 11 Jun 2013 12:52:12 +0000"  >&lt;p&gt;Correct, that is related to that patch - unlink modification don&apos;t flush an page cache. That is may be solution for a lock ping-pong with unlinking a file in parallel to the create, but test failed.&lt;br/&gt;
I will prepare a new patch with correctly update an attributes after take a lock from MDT.&lt;/p&gt;</comment>
                            <comment id="65709" author="ihara" created="Wed, 4 Sep 2013 10:03:47 +0000"  >&lt;p&gt;Any updates on this, new patches?&lt;/p&gt;</comment>
                            <comment id="65710" author="shadow" created="Wed, 4 Sep 2013 10:06:06 +0000"  >&lt;p&gt;I pushed refreshed version few days ago.&lt;/p&gt;</comment>
                            <comment id="65827" author="patrick.valentin" created="Thu, 5 Sep 2013 12:13:31 +0000"  >&lt;p&gt;I tested set 4 of patch &lt;a href=&quot;http://review.whamcloud.com/6460&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6460&lt;/a&gt; on a lustre 2.4.0 client, and it also fixes the issue described in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3849&quot; title=&quot;Client cache directory information is not updated&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3849&quot;&gt;&lt;del&gt;LU-3849&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="68455" author="dmiter" created="Sat, 5 Oct 2013 19:56:38 +0000"  >&lt;p&gt;I tested Patch Set 5 of &lt;a href=&quot;http://review.whamcloud.com/6460&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6460&lt;/a&gt; with master, it partially solve the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4050&quot; title=&quot;NFS reexport issue&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4050&quot;&gt;&lt;del&gt;LU-4050&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="68739" author="spitzcor" created="Thu, 10 Oct 2013 13:25:38 +0000"  >&lt;p&gt;Should &apos;Fix Version/s&apos; == 2.5.0?&lt;/p&gt;</comment>
                            <comment id="68848" author="pjones" created="Fri, 11 Oct 2013 18:02:06 +0000"  >&lt;p&gt;Cory yes that would be more clear&lt;/p&gt;</comment>
                            <comment id="68867" author="jlevi" created="Fri, 11 Oct 2013 22:25:26 +0000"  >&lt;p&gt;Eliminating the extra roc when revalidating remote objects is being handled in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4095&quot; title=&quot;Eliminate extra rpc when revalidating remote objects&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4095&quot;&gt;&lt;del&gt;LU-4095&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="68893" author="sebastien.buisson" created="Mon, 14 Oct 2013 07:38:35 +0000"  >&lt;p&gt;I am sorry, but in addition to patch &lt;a href=&quot;http://review.whamcloud.com/6460&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6460&lt;/a&gt; that was landed on master, I can also see patch &lt;a href=&quot;http://review.whamcloud.com/7910&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7910&lt;/a&gt; merged at the same time.&lt;br/&gt;
Where this patch is coming from? Is this a mandatory part of the solution to this bug? Or is this just a bonus patch?&lt;/p&gt;

&lt;p&gt;TIA,&lt;br/&gt;
Sebastien.&lt;/p&gt;</comment>
                            <comment id="69127" author="paf" created="Wed, 16 Oct 2013 15:52:20 +0000"  >&lt;p&gt;Sebastien,&lt;/p&gt;

&lt;p&gt;As I understand it, that second patch is not required.  Oleg commented (I can&apos;t find where right now) that it was unnecessary to send the parent FID when doing that operation, as the child FID is sufficient.  I don&apos;t believe it fixes any specific problem, it&apos;s just cleanup/improvement.&lt;/p&gt;

&lt;p&gt;To summarize:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/6460&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6460&lt;/a&gt; is, by itself, the full fix for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3240&quot; title=&quot;The link count is not updated after the mkdir&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3240&quot;&gt;&lt;del&gt;LU-3240&lt;/del&gt;&lt;/a&gt;.  7910 is as you said - a bonus patch that came out of the work for 6460.&lt;/p&gt;</comment>
                            <comment id="71783" author="bogl" created="Mon, 18 Nov 2013 14:27:59 +0000"  >&lt;p&gt;backport to b2_4&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/8317&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8317&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="72110" author="yujian" created="Fri, 22 Nov 2013 08:17:31 +0000"  >&lt;p&gt;The patch was cherry-picked to Lustre b2_4 branch.&lt;/p&gt;</comment>
                            <comment id="73918" author="shadow" created="Fri, 20 Dec 2013 08:57:34 +0000"  >&lt;p&gt;please reopen a ticket due introducing an regressions with open with last Oleg changes&lt;br/&gt;
and removing one huge and important part from original patch.&lt;/p&gt;

&lt;p&gt;ps. it&apos;s strange - but i don&apos;t able to reopen ticket.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="20662">LU-3849</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="19125">LU-3385</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="21237">LU-4050</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="22543">LU-4405</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="21381">LU-4095</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvp87:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>7956</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>