<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:34:12 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3472] MDS can&apos;t umount with blocked flock</title>
                <link>https://jira.whamcloud.com/browse/LU-3472</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Here is a testcase&lt;br/&gt;
flock -e /mnt/lustre/ff -c &apos;sleep 10&apos; &amp;amp;&lt;br/&gt;
sleep 1&lt;br/&gt;
flock -e /mnt/lustre/ff -c &apos;sleep 5&apos; &amp;amp;&lt;br/&gt;
sleep 1&lt;br/&gt;
echo &quot;umount -f /mnt/mds1&quot;&lt;br/&gt;
umount -f /mnt/mds1 || true&lt;br/&gt;
killall flock&lt;br/&gt;
ps ax&lt;br/&gt;
echo &quot;umount -f /mnt/lustre&quot;&lt;br/&gt;
umount -f /mnt/lustre || true&lt;br/&gt;
cleanupall -f || error &quot;cleanup failed&quot;&lt;/p&gt;</description>
                <environment></environment>
        <key id="19432">LU-3472</key>
            <summary>MDS can&apos;t umount with blocked flock</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="10100">Low Priority</resolution>
                                        <assignee username="bfaccini">Bruno Faccini</assignee>
                                    <reporter username="askulysh">Andriy Skulysh</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Fri, 14 Jun 2013 10:08:32 +0000</created>
                <updated>Thu, 9 Jan 2020 07:06:59 +0000</updated>
                            <resolved>Thu, 9 Jan 2020 07:06:59 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="60639" author="askulysh" created="Fri, 14 Jun 2013 10:20:52 +0000"  >&lt;p&gt;patch: &lt;a href=&quot;http://review.whamcloud.com/6647&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6647&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="60640" author="askulysh" created="Fri, 14 Jun 2013 10:21:19 +0000"  >&lt;p&gt;Xyratex-bug-id: MRP-997&lt;/p&gt;</comment>
                            <comment id="60673" author="bfaccini" created="Fri, 14 Jun 2013 16:34:34 +0000"  >&lt;p&gt;I think this is only a timing issue due to the time needed to give-up/time-out the FL_UNLCK attempt during the kill or exit of flock cmds. If you just wait/sleep a few seconds before trying to umount from the Client side or simply retry, you will succeed.&lt;/p&gt;</comment>
                            <comment id="60765" author="adilger" created="Mon, 17 Jun 2013 15:54:16 +0000"  >&lt;p&gt;Bruno, I thought you were already working on a patch to clean up FLOCK locks at unmount time?&lt;/p&gt;</comment>
                            <comment id="60833" author="bfaccini" created="Tue, 18 Jun 2013 21:33:53 +0000"  >&lt;p&gt;Andreas, I am sure you refer to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2665&quot; title=&quot;LBUG while unmounting client&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2665&quot;&gt;&lt;del&gt;LU-2665&lt;/del&gt;&lt;/a&gt;, but scenario in this ticket is different since the problem there is that the FL_UNLCK request upon kill/exit of process with a granted FLOCK can be trashed when MDS communications occur leaving an orphan lock finally causing an LBUG during later umount. This is a Client-side problem requiring more robust error handling during FLock cleanup.&lt;/p&gt;

&lt;p&gt;About this ticket&apos;s problem, I just ran the reproducer and found that a 2nd/later umount is successful.&lt;br/&gt;
I will try to investigate further and fully understand what&apos;s going on.&lt;/p&gt;</comment>
                            <comment id="61634" author="bfaccini" created="Tue, 2 Jul 2013 12:27:40 +0000"  >&lt;p&gt;I restarted auto-test on patch-set #2 since it triggered &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3230&quot; title=&quot;conf-sanity fails to start run: umount of OST fails&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3230&quot;&gt;&lt;del&gt;LU-3230&lt;/del&gt;&lt;/a&gt; problem/hang during zfs-review/replay-single/test_90.&lt;/p&gt;</comment>
                            <comment id="62184" author="bfaccini" created="Fri, 12 Jul 2013 13:54:42 +0000"  >&lt;p&gt;Hello, Andriy,&lt;br/&gt;
Have you been able to verify the result of your patch ? Sorry to ask, but running with it, I still get the EBUSY on 1st &quot;umount -f /mnt/lustre&quot; ...&lt;br/&gt;
I am working on this now to understand what&apos;s going wrong.&lt;/p&gt;</comment>
                            <comment id="62499" author="bfaccini" created="Wed, 17 Jul 2013 18:00:05 +0000"  >&lt;p&gt;So definitely, even with patch (that I agree fixes some coding imbalance and add consistency in calling ldlm_flock_blocking_unlink() from ldlm_resource_unlink_lock()!), there is still some timing/asynchronous side effect on Client during FLock cleanup that cause this. Since &quot;killall&quot; works same way (unless man use its -w option), in your reproducer it can not serialize enough to allow a successful 1st Client umount. May be I am wrong and missed something (tell me!), but I don&apos;t think we can fix this, it looks just like to ask for I/Os to be complete when sync() just returns.&lt;/p&gt;</comment>
                            <comment id="260851" author="adilger" created="Thu, 9 Jan 2020 07:06:59 +0000"  >&lt;p&gt;Close old bug&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvtdj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8700</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>