<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:43:03 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4474] deadlock of ldiskfs_quota_off()</title>
                <link>https://jira.whamcloud.com/browse/LU-4474</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;When I ran quotacheck directly on ldiskfs, the process stuck. Following is the dump stack. The ext4_quota_off() function added by ext4-quota-force-block-alloc-quotaoff.patch tried to grab read lock of sb-&amp;gt;s_umount. But it stuck there because it was holding write lock of sb-&amp;gt;s_umount.&lt;/p&gt;

&lt;p&gt;COMMAND: &quot;quotacheck&quot; &lt;br/&gt;
   TASK: ffff88010286aae0  &lt;span class=&quot;error&quot;&gt;&amp;#91;THREAD_INFO: ffff880102610000&amp;#93;&lt;/span&gt;&lt;br/&gt;
    CPU: 0&lt;br/&gt;
  STATE: TASK_UNINTERRUPTIBLE &lt;br/&gt;
crash&amp;gt; bt&lt;br/&gt;
PID: 21560  TASK: ffff88010286aae0  CPU: 0   COMMAND: &quot;quotacheck&quot; &lt;br/&gt;
 #0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611b60&amp;#93;&lt;/span&gt; schedule at ffffffff8150c432&lt;br/&gt;
 #1 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611c28&amp;#93;&lt;/span&gt; rwsem_down_failed_common at ffffffff8150e9e5&lt;br/&gt;
 #2 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611c88&amp;#93;&lt;/span&gt; rwsem_down_read_failed at ffffffff8150eb76&lt;br/&gt;
 #3 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611cc8&amp;#93;&lt;/span&gt; call_rwsem_down_read_failed at ffffffff81281b94&lt;br/&gt;
 #4 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611d30&amp;#93;&lt;/span&gt; ldiskfs_quota_off at ffffffffa02fe115 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #5 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611d90&amp;#93;&lt;/span&gt; ldiskfs_remount at ffffffffa02fd41d &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 #6 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611e60&amp;#93;&lt;/span&gt; do_remount_sb at ffffffff811825bc&lt;br/&gt;
 #7 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611ea0&amp;#93;&lt;/span&gt; do_mount at ffffffff811a3915&lt;br/&gt;
 #8 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611f20&amp;#93;&lt;/span&gt; sys_mount at ffffffff811a3c10&lt;br/&gt;
 #9 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff880102611f80&amp;#93;&lt;/span&gt; system_call_fastpath at ffffffff8100b072&lt;br/&gt;
    RIP: 00007f97e757ee0a  RSP: 00007fffe66e9388  RFLAGS: 00010297&lt;br/&gt;
    RAX: 00000000000000a5  RBX: ffffffff8100b072  RCX: 0000000000000001&lt;br/&gt;
    RDX: 00007f97e9f02940  RSI: 00007f97e9f01740  RDI: 0000000000000000&lt;br/&gt;
    RBP: 00007f97e822b4a0   R8: 0000000000000000   R9: 0000000000000000&lt;br/&gt;
    R10: 00000000c0ed0021  R11: 0000000000000206  R12: 00007f97e7ea7047&lt;br/&gt;
    R13: 00007f97e7ea5c00  R14: 0000000000000001  R15: 00007f97e822b4a0&lt;br/&gt;
    ORIG_RAX: 00000000000000a5  CS: 0033  SS: 002b&lt;br/&gt;
crash&amp;gt; &lt;/p&gt;

&lt;p&gt;Following is how to reproduce it. Please note that the version of quota-tools is 4.0.1. Somehow quota-tools of version 3.17 refuses to quotacheck a ldiskfs mount point.&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@vm1 quota-tools&amp;#93;&lt;/span&gt;# mount -t ldiskfs /dev/sdb3 -o usrquota /mnt/&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@vm1 quota-tools&amp;#93;&lt;/span&gt;# ./quotacheck /mnt/ -f&lt;br/&gt;
.&lt;/p&gt;</description>
                <environment></environment>
        <key id="22713">LU-4474</key>
            <summary>deadlock of ldiskfs_quota_off()</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="lixi">Li Xi</reporter>
                        <labels>
                            <label>ldiskfs</label>
                    </labels>
                <created>Sat, 11 Jan 2014 15:14:09 +0000</created>
                <updated>Thu, 29 Sep 2016 17:52:18 +0000</updated>
                            <resolved>Thu, 29 Sep 2016 17:52:18 +0000</resolved>
                                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="74773" author="lixi" created="Sat, 11 Jan 2014 15:14:40 +0000"  >&lt;p&gt;I am sure, but maybe this problem could be fixed by removing the down_read(&amp;amp;sb-&amp;gt;s_umount)? It seems that a write lock of sb-&amp;gt;s_umount is alway being hold when ext4_quota_off() is called. Please correct me if I am wrong. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;</comment>
                            <comment id="74774" author="pjones" created="Sat, 11 Jan 2014 15:27:46 +0000"  >&lt;p&gt;NIu&lt;/p&gt;

&lt;p&gt;Could you please comment?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="74793" author="niu" created="Mon, 13 Jan 2014 01:05:29 +0000"  >&lt;p&gt;Hi, LiXi&lt;/p&gt;

&lt;p&gt;What kernel version did you use? I didn&apos;t see how ldiskfs_remount calls ldiskfs_quota_off in my kernel (2.6.32-279).&lt;/p&gt;

&lt;p&gt;BTW: Why do you need a quotacheck on ldiskfs? Isn&apos;t it obsoleted?&lt;/p&gt;</comment>
                            <comment id="74794" author="lixi" created="Mon, 13 Jan 2014 01:31:56 +0000"  >&lt;p&gt;Hi Yawei,&lt;/p&gt;

&lt;p&gt;I am using linux-2.6.32-358.18.1.el6.x86_64. vfs_dq_off() is called in ext4_remount().&lt;/p&gt;

&lt;p&gt;static int ext4_remount(struct super_block *sb, int *flags, char *data)&lt;br/&gt;
{&lt;br/&gt;
	...&lt;br/&gt;
        if ((*flags &amp;amp; MS_RDONLY) != (sb-&amp;gt;s_flags &amp;amp; MS_RDONLY) ||&lt;br/&gt;
                n_blocks_count &amp;gt; ext4_blocks_count(es)) {&lt;br/&gt;
                if (sbi-&amp;gt;s_mount_flags &amp;amp; EXT4_MF_FS_ABORTED) &lt;/p&gt;
{
                        err = -EROFS;
                        goto restore_opts;
                }

&lt;p&gt;                if (*flags &amp;amp; MS_RDONLY) {&lt;br/&gt;
                        err = vfs_dq_off(sb, 1);&lt;br/&gt;
                        if (err &amp;lt; 0 &amp;amp;&amp;amp; err != -ENOSYS) &lt;/p&gt;
{
                                err = -EBUSY;
                                goto restore_opts;
                        }

&lt;p&gt;                        /*&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;First of all, the unconditional stuff we have to do&lt;/li&gt;
	&lt;li&gt;to disable replay of the journal when we next remount&lt;br/&gt;
                         */&lt;br/&gt;
                        sb-&amp;gt;s_flags |= MS_RDONLY;&lt;br/&gt;
...&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Yeah, I understand this is not a common operation for Lustre. I thought we can use ldiskfs as a normal file system, so I ran quota utility directly on ldiskfs. If it is not proper to do so, do you have any suggestions about how to check the quota mechanism of ldiskfs? As you know, I am working on a quota stuff. Quota codes of ldiskfs have to be updated for support. Thanks!&lt;/p&gt;</comment>
                            <comment id="74805" author="niu" created="Mon, 13 Jan 2014 04:18:13 +0000"  >&lt;p&gt;quota accounting is always enabled on ldiskfs (see ext4-quota-first-class.patch), so quotacheck is no longer needed anymore. (if the filesystem is upgraded from old ldiskfs which doesn&apos;t have EXT4_FEATURE_RO_COMPAT_QUOTA feature, the quota accounting will be udpated automatically when you enable the feature by &apos;tune2fs -O quota&apos;)&lt;/p&gt;</comment>
                            <comment id="74807" author="lixi" created="Mon, 13 Jan 2014 04:27:11 +0000"  >&lt;p&gt;Ah, thank you! Do you think this problem could be trigger by any operation from Lustre or not? I think it would be better to fix this problem anyway if it is not too difficult.&lt;/p&gt;</comment>
                            <comment id="74811" author="niu" created="Mon, 13 Jan 2014 06:11:59 +0000"  >&lt;p&gt;I think it could probably be triggered in Lustre, but I didn&apos;t see why it was never reported before.&lt;/p&gt;

&lt;p&gt;Looks it can&apos;t be fixed simply be removing down_read(s_umount), because I didn&apos;t see s_umount is held in quotactl path (do_quotactl()). Could you try to search to see if it&apos;s already fixed in upstream?&lt;/p&gt;</comment>
                            <comment id="74884" author="niu" created="Tue, 14 Jan 2014 02:00:11 +0000"  >&lt;p&gt;Looks it has been fixed in upstream:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;It must be already acquired by caller. Fix lock_dep complain.

Signed-off-by: Dmitry Monakhov &amp;lt;dmon...@gmail.com&amp;gt;
Reviewed-by: Jan Kara &amp;lt;ja...@suse.cz&amp;gt;

diff --git a/fs/ext4/&lt;span class=&quot;code-keyword&quot;&gt;super&lt;/span&gt;.c b/fs/ext4/&lt;span class=&quot;code-keyword&quot;&gt;super&lt;/span&gt;.c
index e59eb37..062d1bc 100644
--- a/fs/ext4/&lt;span class=&quot;code-keyword&quot;&gt;super&lt;/span&gt;.c
+++ b/fs/ext4/&lt;span class=&quot;code-keyword&quot;&gt;super&lt;/span&gt;.c
@@ -4107,12 +4107,10 @@ &lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; ext4_quota_on(struct super_block *sb, &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt;
type, &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; format_id,

 &lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; ext4_quota_off(struct super_block *sb, &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; type)
 {
-	&lt;span class=&quot;code-comment&quot;&gt;/* Force all delayed allocation blocks to be allocated */&lt;/span&gt;
-	&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (test_opt(sb, DELALLOC)) {
-		down_read(&amp;amp;sb-&amp;gt;s_umount);
+	/* Force all delayed allocation blocks to be allocated.
+	 * Caller already holds s_umount sem */
+	&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (test_opt(sb, DELALLOC))
 		sync_filesystem(sb);
-		up_read(&amp;amp;sb-&amp;gt;s_umount);
-	}

 	&lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; dquot_quota_off(sb, type);
 }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I&apos;ll backport it to Lustre, thank you, LiXi.&lt;/p&gt;</comment>
                            <comment id="74889" author="lixi" created="Tue, 14 Jan 2014 02:16:13 +0000"  >&lt;p&gt;Ah, Yawei, thank you so much for investigte this!&lt;/p&gt;</comment>
                            <comment id="74892" author="niu" created="Tue, 14 Jan 2014 03:45:58 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/8828&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8828&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="75158" author="lixi" created="Fri, 17 Jan 2014 07:21:04 +0000"  >&lt;p&gt;Hi Yawei,&lt;/p&gt;

&lt;p&gt;I found following messages when running mkfs.lustre on a server which has applied your patch. It seems sync_filesystem() complained that: WARN_ON(!rwsem_is_locked(&amp;amp;sb-&amp;gt;s_umount));&lt;/p&gt;

&lt;p&gt;Do you think this related to the patch?&lt;/p&gt;

&lt;p&gt;Thanks!&lt;/p&gt;

&lt;p&gt;Jan 16 13:36:53 vm1 kernel: LDISKFS-fs (sdb3): mounted filesystem with ordered data mode. quota=on. Opts: &lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/sync.c:64 sync_filesystem+0x63/0x70() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 19717, comm: mkfs.lustre Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b1893&amp;gt;&amp;#93;&lt;/span&gt; ? sync_filesystem+0x63/0x70&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02e7048&amp;gt;&amp;#93;&lt;/span&gt; ? ldiskfs_quota_off+0x218/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81183496&amp;gt;&amp;#93;&lt;/span&gt; ? deactivate_super+0x46/0x80&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a17ef&amp;gt;&amp;#93;&lt;/span&gt; ? mntput_no_expire+0xbf/0x110&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a225b&amp;gt;&amp;#93;&lt;/span&gt; ? sys_umount+0x7b/0x3a0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; ? system_call_fastpath+0x16/0x1b&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: --&lt;del&gt;[ end trace 7cd192156324e646 ]&lt;/del&gt;--&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/fs-writeback.c:1051 writeback_inodes_sb_nr+0x94/0xa0() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 19717, comm: mkfs.lustre Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811ab704&amp;gt;&amp;#93;&lt;/span&gt; ? writeback_inodes_sb_nr+0x94/0xa0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811ab753&amp;gt;&amp;#93;&lt;/span&gt; ? writeback_inodes_sb+0x43/0x50&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b162e&amp;gt;&amp;#93;&lt;/span&gt; ? __sync_filesystem+0x3e/0x90&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b186a&amp;gt;&amp;#93;&lt;/span&gt; ? sync_filesystem+0x3a/0x70&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02e7048&amp;gt;&amp;#93;&lt;/span&gt; ? ldiskfs_quota_off+0x218/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81183496&amp;gt;&amp;#93;&lt;/span&gt; ? deactivate_super+0x46/0x80&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a17ef&amp;gt;&amp;#93;&lt;/span&gt; ? mntput_no_expire+0xbf/0x110&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a225b&amp;gt;&amp;#93;&lt;/span&gt; ? sys_umount+0x7b/0x3a0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; ? system_call_fastpath+0x16/0x1b&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: --&lt;del&gt;[ end trace 7cd192156324e647 ]&lt;/del&gt;--&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/fs-writeback.c:548 wb_writeback+0x339/0x3f0() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 18643, comm: flush-8:16 Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811aca49&amp;gt;&amp;#93;&lt;/span&gt; ? wb_writeback+0x339/0x3f0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8150c480&amp;gt;&amp;#93;&lt;/span&gt; ? thread_return+0x4e/0x76e&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81081be2&amp;gt;&amp;#93;&lt;/span&gt; ? del_timer_sync+0x22/0x30&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811acbbb&amp;gt;&amp;#93;&lt;/span&gt; ? wb_do_writeback+0xbb/0x240&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811acda3&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_writeback_task+0x63/0x1b0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81096c67&amp;gt;&amp;#93;&lt;/span&gt; ? bit_waitqueue+0x17/0xd0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c1d0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c256&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x86/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c1d0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81096a36&amp;gt;&amp;#93;&lt;/span&gt; ? kthread+0x96/0xa0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0ca&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0xa/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff810969a0&amp;gt;&amp;#93;&lt;/span&gt; ? kthread+0x0/0xa0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0c0&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0x0/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: --&lt;del&gt;[ end trace 7cd192156324e648 ]&lt;/del&gt;--&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/fs-writeback.c:1131 sync_inodes_sb+0x169/0x190() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 19717, comm: mkfs.lustre Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811ab8c9&amp;gt;&amp;#93;&lt;/span&gt; ? sync_inodes_sb+0x169/0x190&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b1672&amp;gt;&amp;#93;&lt;/span&gt; ? __sync_filesystem+0x82/0x90&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b187b&amp;gt;&amp;#93;&lt;/span&gt; ? sync_filesystem+0x4b/0x70&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02e7048&amp;gt;&amp;#93;&lt;/span&gt; ? ldiskfs_quota_off+0x218/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81183496&amp;gt;&amp;#93;&lt;/span&gt; ? deactivate_super+0x46/0x80&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a17ef&amp;gt;&amp;#93;&lt;/span&gt; ? mntput_no_expire+0xbf/0x110&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a225b&amp;gt;&amp;#93;&lt;/span&gt; ? sys_umount+0x7b/0x3a0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; ? system_call_fastpath+0x16/0x1b&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: --&lt;del&gt;[ end trace 7cd192156324e649 ]&lt;/del&gt;--&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/fs-writeback.c:548 wb_writeback+0x339/0x3f0() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 18643, comm: flush-8:16 Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811aca49&amp;gt;&amp;#93;&lt;/span&gt; ? wb_writeback+0x339/0x3f0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8150c480&amp;gt;&amp;#93;&lt;/span&gt; ? thread_return+0x4e/0x76e&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81081be2&amp;gt;&amp;#93;&lt;/span&gt; ? del_timer_sync+0x22/0x30&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811acbbb&amp;gt;&amp;#93;&lt;/span&gt; ? wb_do_writeback+0xbb/0x240&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811acda3&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_writeback_task+0x63/0x1b0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81096c67&amp;gt;&amp;#93;&lt;/span&gt; ? bit_waitqueue+0x17/0xd0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c1d0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c256&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x86/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8113c1d0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81096a36&amp;gt;&amp;#93;&lt;/span&gt; ? kthread+0x96/0xa0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0ca&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0xa/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff810969a0&amp;gt;&amp;#93;&lt;/span&gt; ? kthread+0x0/0xa0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c0c0&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0x0/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: --&lt;del&gt;[ end trace 7cd192156324e64a ]&lt;/del&gt;--&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: WARNING: at fs/fs-writeback.c:990 sync_inodes_sb+0x17f/0x190() (Tainted: G        W  ---------------   )&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Hardware name: KVM&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Modules linked in: ldiskfs sha512_generic sha256_generic bridge stp llc nfs lockd fscache auth_rpcgss nfs_acl sunrpc ipv6 microcode virtio_balloon 8139too 8139cp mii i2c_piix4 i2c_core sg ext4 jbd2 mbcache sd_mod crc_t10dif pata_acpi ata_generic ata_piix virtio_pci virtio_ring virtio dm_mirror dm_region_hash dm_log dm_mod &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Pid: 19717, comm: mkfs.lustre Tainted: G        W  ---------------    2.6.32 #4&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: Call Trace:&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e3e7&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_common+0x87/0xc0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8106e43a&amp;gt;&amp;#93;&lt;/span&gt; ? warn_slowpath_null+0x1a/0x20&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811ab8df&amp;gt;&amp;#93;&lt;/span&gt; ? sync_inodes_sb+0x17f/0x190&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b1672&amp;gt;&amp;#93;&lt;/span&gt; ? __sync_filesystem+0x82/0x90&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811b187b&amp;gt;&amp;#93;&lt;/span&gt; ? sync_filesystem+0x4b/0x70&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02e7048&amp;gt;&amp;#93;&lt;/span&gt; ? ldiskfs_quota_off+0x218/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81183496&amp;gt;&amp;#93;&lt;/span&gt; ? deactivate_super+0x46/0x80&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a17ef&amp;gt;&amp;#93;&lt;/span&gt; ? mntput_no_expire+0xbf/0x110&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a225b&amp;gt;&amp;#93;&lt;/span&gt; ? sys_umount+0x7b/0x3a0&lt;br/&gt;
Jan 16 13:36:53 vm1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; ? system_call_fastpath+0x16/0x1b&lt;/p&gt;</comment>
                            <comment id="75162" author="niu" created="Fri, 17 Jan 2014 08:16:10 +0000"  >&lt;p&gt;Yes, indeed. Looks there were heavy changes about the s_umount lock in upstream kernel, and we can&apos;t simply backport the path.&lt;/p&gt;</comment>
                            <comment id="75255" author="niu" created="Mon, 20 Jan 2014 03:14:32 +0000"  >&lt;p&gt;Hi, LiXi&lt;br/&gt;
Given that remount operation will never be performed on Lustre device in normal usage, I think this deadlock should never happen on Lustre, I&apos;m wondering if we can just leave it as Not Fixed. What do you think about? (Is there any operation in Lustre normal uasage could trigger this deadlock?)&lt;/p&gt;</comment>
                            <comment id="84355" author="lixi" created="Mon, 19 May 2014 02:34:41 +0000"  >&lt;p&gt;Hi Yawei,&lt;/p&gt;

&lt;p&gt;Sorry for my late reply. I got this problem when changing some codes of ldiskfs. And I don&apos;t know there is any way to trigger this problem from Lustre operation. So, I agree to close this ticket as won&apos;t fix.&lt;/p&gt;

&lt;p&gt;Thanks!&lt;/p&gt;</comment>
                            <comment id="84356" author="niu" created="Mon, 19 May 2014 03:32:54 +0000"  >&lt;p&gt;Thank you, LiXi.&lt;/p&gt;</comment>
                            <comment id="157945" author="gerrit" created="Thu, 7 Jul 2016 11:29:03 +0000"  >&lt;p&gt;Artem Blagodarenko (artem.blagodarenko@seagate.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21181&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21181&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4474&quot; title=&quot;deadlock of ldiskfs_quota_off()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4474&quot;&gt;&lt;del&gt;LU-4474&lt;/del&gt;&lt;/a&gt; osd: Add nodelalloc to ldisk mount options&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4130fa92a09b80abf57ba7d539214948ddbf2869&lt;/p&gt;</comment>
                            <comment id="157946" author="artem_blagodarenko" created="Thu, 7 Jul 2016 11:33:11 +0000"  >&lt;p&gt;This hang happened on my system because SELinux is enabled (permissive mode) on my VM. But it can happened in different situations then os_mount cleanup happens&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;&lt;span class=&quot;code-quote&quot;&gt;&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot;&lt;/span&gt; disables &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; message.
mount.lustre D 0000000000000000 0 10566 10564 0x00000080
ffff88004e689710 0000000000000082 0000000000000000 ffff88004e689718
0000000000000000 ffff88004e6896e8 0000000000000286 ffff88004e6896e8
ffff88004d5d7af8 ffff88004e689fd8 000000000000fbc8 ffff88004d5d7af8
Call Trace:
[&amp;lt;ffffffff8152a6d5&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
[&amp;lt;ffffffff8152a866&amp;gt;] rwsem_down_read_failed+0x26/0x30
[&amp;lt;ffffffff8128f174&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
[&amp;lt;ffffffff81529d64&amp;gt;] ? down_read+0x24/0x30
[&amp;lt;ffffffffa060217c&amp;gt;] ldiskfs_quota_off+0x1bc/0x1f0 [ldiskfs]
[&amp;lt;ffffffff812325aa&amp;gt;] ? selinux_sb_copy_data+0x14a/0x1e0
[&amp;lt;ffffffff8118b866&amp;gt;] deactivate_locked_super+0x46/0x90
[&amp;lt;ffffffff8118b9bd&amp;gt;] vfs_kern_mount+0x10d/0x1b0
[&amp;lt;ffffffffa0654cc4&amp;gt;] osd_mount+0x544/0xf90 [osd_ldiskfs]
[&amp;lt;ffffffffa0655c1f&amp;gt;] osd_device_alloc+0x50f/0x9d0 [osd_ldiskfs]
[&amp;lt;ffffffffa1010d7f&amp;gt;] obd_setup+0x1bf/0x290 [obdclass]
[&amp;lt;ffffffffa0ef9820&amp;gt;] ? cfs_hash_create+0x2e0/0x940 [libcfs]
[&amp;lt;ffffffffa1010fff&amp;gt;] class_setup+0x1af/0x7d0 [obdclass]
[&amp;lt;ffffffffa0ff550f&amp;gt;] ? class_name2dev+0x2f/0x40 [obdclass]
[&amp;lt;ffffffffa1018fac&amp;gt;] class_process_config+0xc6c/0x1ad0 [obdclass]
[&amp;lt;ffffffffa0eeb888&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
[&amp;lt;ffffffffa101dfab&amp;gt;] ? lustre_cfg_new+0x40b/0x6f0 [obdclass]
[&amp;lt;ffffffffa101e3e8&amp;gt;] do_lcfg+0x158/0x450 [obdclass]
[&amp;lt;ffffffffa101e774&amp;gt;] lustre_start_simple+0x94/0x200 [obdclass]
[&amp;lt;ffffffffa1059411&amp;gt;] server_fill_super+0xf61/0x1abc [obdclass]
[&amp;lt;ffffffffa0eeb888&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
[&amp;lt;ffffffffa1024388&amp;gt;] lustre_fill_super+0x1d8/0x550 [obdclass]
[&amp;lt;ffffffffa10241b0&amp;gt;] ? lustre_fill_super+0x0/0x550 [obdclass]
[&amp;lt;ffffffff8118c2cf&amp;gt;] get_sb_nodev+0x5f/0xa0
[&amp;lt;ffffffffa101bec5&amp;gt;] lustre_get_sb+0x25/0x30 [obdclass]
[&amp;lt;ffffffff8118b92b&amp;gt;] vfs_kern_mount+0x7b/0x1b0
[&amp;lt;ffffffff8118bad2&amp;gt;] do_kern_mount+0x52/0x130
[&amp;lt;ffffffff811acb0b&amp;gt;] do_mount+0x2fb/0x930
[&amp;lt;ffffffff811ad1d0&amp;gt;] sys_mount+0x90/0xe0
[&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;Just tested on my local environment.&lt;br/&gt;
Without the patch&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;root@devvm-sl6-2]# sestatus
SELinux status:                 enabled
SELinuxfs mount:                /selinux
Current mode:                   permissive
Mode from config file:          permissive
Policy version:                 24
Policy from config file:        mls

[root@devvm-sl6-2]# LOAD=yes lustre/tests/llmount.sh
Loading modules from /lustre/mnt/work/MRP-3473/lustre/tests/..
detected 8 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=vfstrace rpctrace dlmtrace neterror ha config               ioctl &lt;span class=&quot;code-keyword&quot;&gt;super&lt;/span&gt; lfsck
subsystem_debug=all -lnet -lnd -pinger
gss/krb5 is not supported

[root@devvm-sl6-2]# mount -t lustre -o loop -o context=fake_context /tmp/lustre-mdt1 /mnt/mds1

SELinux:  duplicate or incompatible mount options
INFO: task mount.lustre:10567 blocked &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; more than 120 seconds.
      Not tainted 2.6.32master #0
&lt;span class=&quot;code-quote&quot;&gt;&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot;&lt;/span&gt; disables &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; message.
mount.lustre  D 0000000000000007     0 10567  10565 0x00000080
 ffff880064bd7710 0000000000000086 00000000ffffffea ffff880064bd7718
 0000000000000000 ffff880064bd76e8 0000000000000282 ffff880064bd76e8
 ffff880037335098 ffff880064bd7fd8 000000000000fbc8 ffff880037335098
Call Trace:
 [&amp;lt;ffffffff8152a6d5&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
 [&amp;lt;ffffffff8152a866&amp;gt;] rwsem_down_read_failed+0x26/0x30
 [&amp;lt;ffffffff8128f174&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
 [&amp;lt;ffffffff81529d64&amp;gt;] ? down_read+0x24/0x30
 [&amp;lt;ffffffffa0cd326c&amp;gt;] ldiskfs_quota_off+0x1bc/0x1f0 [ldiskfs]
 [&amp;lt;ffffffff812325aa&amp;gt;] ? selinux_sb_copy_data+0x14a/0x1e0
 [&amp;lt;ffffffff8118b866&amp;gt;] deactivate_locked_super+0x46/0x90
 [&amp;lt;ffffffff8118b9bd&amp;gt;] vfs_kern_mount+0x10d/0x1b0
 [&amp;lt;ffffffffa0d25cd4&amp;gt;] osd_mount+0x544/0xf90 [osd_ldiskfs]
 [&amp;lt;ffffffffa0d26c2f&amp;gt;] osd_device_alloc+0x50f/0x9d0 [osd_ldiskfs]
 [&amp;lt;ffffffffa061b3ff&amp;gt;] obd_setup+0x1bf/0x290 [obdclass]
 [&amp;lt;ffffffffa052f840&amp;gt;] ? cfs_hash_create+0x2e0/0x940 [libcfs]
 [&amp;lt;ffffffffa061b67f&amp;gt;] class_setup+0x1af/0x7d0 [obdclass]
 [&amp;lt;ffffffffa05ffb8f&amp;gt;] ? class_name2dev+0x2f/0x40 [obdclass]
 [&amp;lt;ffffffffa062362c&amp;gt;] class_process_config+0xc6c/0x1ad0 [obdclass]
 [&amp;lt;ffffffffa0628715&amp;gt;] ? lustre_cfg_new+0x4f5/0x6f0 [obdclass]
 [&amp;lt;ffffffffa0628a68&amp;gt;] do_lcfg+0x158/0x450 [obdclass]
 [&amp;lt;ffffffffa0628df4&amp;gt;] lustre_start_simple+0x94/0x200 [obdclass]
 [&amp;lt;ffffffffa0663c51&amp;gt;] server_fill_super+0x1021/0x1b76 [obdclass]
 [&amp;lt;ffffffffa062ea58&amp;gt;] lustre_fill_super+0x218/0x5f0 [obdclass]
 [&amp;lt;ffffffffa062e840&amp;gt;] ? lustre_fill_super+0x0/0x5f0 [obdclass]
 [&amp;lt;ffffffff8118c2cf&amp;gt;] get_sb_nodev+0x5f/0xa0
 [&amp;lt;ffffffffa0626545&amp;gt;] lustre_get_sb+0x25/0x30 [obdclass]
 [&amp;lt;ffffffff8118b92b&amp;gt;] vfs_kern_mount+0x7b/0x1b0
 [&amp;lt;ffffffff8118bad2&amp;gt;] do_kern_mount+0x52/0x130
 [&amp;lt;ffffffff811acb0b&amp;gt;] do_mount+0x2fb/0x930
 [&amp;lt;ffffffff811ad1d0&amp;gt;] sys_mount+0x90/0xe0
 [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
INFO: task mount.lustre:10567 blocked &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; more than 120 seconds.
      Not tainted 2.6.32master #0
&lt;span class=&quot;code-quote&quot;&gt;&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot;&lt;/span&gt; disables &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; message.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;With patch&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;SELinux status:                 enabled
SELinuxfs mount:                /selinux
Current mode:                   permissive
Mode from config file:          permissive
Policy version:                 24
Policy from config file:        mls
root@devvm-sl6-2]# mount -t lustre -o loop -o context=fake_context /tmp/lustre-mdt1 /mnt/mds1
mount.lustre: mount /dev/loop0 at /mnt/mds1 failed: Invalid argument
This may have multiple causes.
Are the mount options correct?
Check the syslog &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; more info.

[root@devvm-sl6-2]# dmesg | tail -n 50
LDISKFS-fs (loop0): mounted filesystem with ordered data mode. quota=on. Opts:
SELinux:  duplicate or incompatible mount options
LustreError: 7914:0:(osd_handler.c:5900:osd_mount()) lustre-MDT0000-osd: can&apos;t mount /dev/loop0: -22
LustreError: 7914:0:(obd_config.c:517:class_setup()) setup lustre-MDT0000-osd failed (-22)
LustreError: 7914:0:(obd_mount.c:207:lustre_start_simple()) lustre-MDT0000-osd setup error -22
LustreError: 7914:0:(obd_mount_server.c:1713:server_fill_super()) Unable to start osd on /dev/loop0: -22
LustreError: 7914:0:(obd_mount.c:1327:lustre_fill_super()) Unable to mount  (-22)
[root@devvm-sl6-2]#
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;After patch mount exit with error as expected.&lt;/p&gt;
</comment>
                            <comment id="165692" author="bfaccini" created="Mon, 12 Sep 2016 16:02:46 +0000"  >&lt;p&gt;Artem,&lt;br/&gt;
I wonder if your change #21181 will not introduce some performance decrease for some workloads??&lt;br/&gt;
Did you expose it to some filesystem benchmarks suites?&lt;/p&gt;</comment>
                            <comment id="166995" author="bfaccini" created="Thu, 22 Sep 2016 23:32:05 +0000"  >&lt;p&gt;Well, after having some internal discussions, it has been established that &quot;delayed allocation&quot; feature of ext4 is not being used in ldiskfs, so your patch (Gerrit-change #21181) finally looks as an easy way to fix this problem!&lt;/p&gt;</comment>
                            <comment id="167738" author="gerrit" created="Thu, 29 Sep 2016 14:59:30 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/21181/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21181/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4474&quot; title=&quot;deadlock of ldiskfs_quota_off()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4474&quot;&gt;&lt;del&gt;LU-4474&lt;/del&gt;&lt;/a&gt; osd: Add nodelalloc to ldiskfs mount options&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 7597c672850e7e9232f7400170455fd12aa08b8a&lt;/p&gt;</comment>
                            <comment id="167779" author="pjones" created="Thu, 29 Sep 2016 17:52:18 +0000"  >&lt;p&gt;Landed for 2.9. Please speak up if any further work is still needed&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwco7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12256</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>