<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:15:47 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8233] Spurious mounts remaining on client node(s)</title>
                <link>https://jira.whamcloud.com/browse/LU-8233</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Error occurred during soak testing of build &apos;20160512&apos; (see &lt;a href=&quot;https://wiki.hpdd.intel.com/pages/viewpage.action?title=Soak+Testing+on+Lola&amp;amp;spaceKey=Releases#SoakTestingonLola-20160512&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.hpdd.intel.com/pages/viewpage.action?title=Soak+Testing+on+Lola&amp;amp;spaceKey=Releases#SoakTestingonLola-20160512&lt;/a&gt; )&lt;br/&gt;
DNE is &lt;em&gt;disabled&lt;/em&gt;. MDTs had been formatted using ldiskfs, OSTs using zfs. MDS nodes are configured in active - active HA failover configuration. &lt;/p&gt;

&lt;p&gt;After triggering an umount command for the Lustre FS some of the clients don&apos;t complete the umount process successful.&lt;br/&gt;
A spurious mount is still present and can be displayed via &lt;tt&gt;mount&lt;/tt&gt; command and &lt;tt&gt;/etc/mtab&lt;/tt&gt;. the FS itself isn&apos;t accessible anymore:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-26 ~]# mount
/dev/sda1 on / type ext3 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
10.4.0.1:/export/scratch on /scratch type nfs (rw,addr=10.4.0.1)
10.4.0.1:/home on /home type nfs (rw,addr=10.4.0.1)
nfsd on /proc/fs/nfsd type nfsd (rw)
192.168.1.108@o2ib10:192.168.1.109@o2ib10:/soaked on /mnt/soaked type lustre (rw,user_xattr)
[root@lola-26 ~]# cat /etc/mtab
/dev/sda1 / ext3 rw 0 0
proc /proc proc rw 0 0
sysfs /sys sysfs rw 0 0
devpts /dev/pts devpts rw,gid=5,mode=620 0 0
tmpfs /dev/shm tmpfs rw 0 0
none /proc/sys/fs/binfmt_misc binfmt_misc rw 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw 0 0
10.4.0.1:/export/scratch /scratch nfs rw,addr=10.4.0.1 0 0
10.4.0.1:/home /home nfs rw,addr=10.4.0.1 0 0
nfsd /proc/fs/nfsd nfsd rw 0 0
192.168.1.108@o2ib10:192.168.1.109@o2ib10:/soaked /mnt/soaked lustre rw,user_xattr 0 0

[root@lola-26 ~]# ll /mnt/soaked/
total 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Executing &lt;tt&gt;umount&lt;/tt&gt;  a second time &apos;clears&apos; the mount status:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-26 ~]# umount /mnt/soaked
umount: /mnt/soaked: not mounted
[root@lola-26 ~]# mount
/dev/sda1 on / type ext3 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
10.4.0.1:/export/scratch on /scratch type nfs (rw,addr=10.4.0.1)
10.4.0.1:/home on /home type nfs (rw,addr=10.4.0.1)
nfsd on /proc/fs/nfsd type nfsd (rw)
[root@lola-26 ~]# cat /etc/mtab 
/dev/sda1 / ext3 rw 0 0
proc /proc proc rw 0 0
sysfs /sys sysfs rw 0 0
devpts /dev/pts devpts rw,gid=5,mode=620 0 0
tmpfs /dev/shm tmpfs rw 0 0
none /proc/sys/fs/binfmt_misc binfmt_misc rw 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw 0 0
10.4.0.1:/export/scratch /scratch nfs rw,addr=10.4.0.1 0 0
10.4.0.1:/home /home nfs rw,addr=10.4.0.1 0 0
nfsd /proc/fs/nfsd nfsd rw 0 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Attached files console, messages and kernel debug log of affected client node (&lt;tt&gt;lola-26&lt;/tt&gt;&lt;/p&gt;</description>
                <environment>lola&lt;br/&gt;
build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-master/3365/&quot;&gt;https://build.hpdd.intel.com/job/lustre-master/3365/&lt;/a&gt;  (el6.7, x86_64)</environment>
        <key id="37332">LU-8233</key>
            <summary>Spurious mounts remaining on client node(s)</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="heckes">Frank Heckes</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Thu, 2 Jun 2016 12:21:22 +0000</created>
                <updated>Wed, 13 Oct 2021 02:56:22 +0000</updated>
                            <resolved>Wed, 13 Oct 2021 02:56:22 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="154475" author="green" created="Thu, 2 Jun 2016 17:30:13 +0000"  >&lt;p&gt;so whn this condition arises - was there an error from unmount printed?&lt;/p&gt;

&lt;p&gt;It looks like unmount worked ok, just the /etc/mtab was not updated for some reason.&lt;/p&gt;</comment>
                            <comment id="154477" author="adilger" created="Thu, 2 Jun 2016 17:32:32 +0000"  >&lt;p&gt;This could be checked by seeing whether the filesystem is only in &lt;tt&gt;/etc/mtab&lt;/tt&gt; and not in &lt;tt&gt;/proc/mounts&lt;/tt&gt;.&lt;/p&gt;</comment>
                            <comment id="154479" author="adilger" created="Thu, 2 Jun 2016 17:36:22 +0000"  >&lt;p&gt;If there are problems to debug the source of this problem, it might be possible to replace calls to &quot;umount&quot; with &quot;strace -o /tmp/umount.$$ umount&quot; so that we get a log of why the /etc/mtab update didn&apos;t happen.&lt;/p&gt;</comment>
                            <comment id="154551" author="heckes" created="Fri, 3 Jun 2016 07:54:12 +0000"  >&lt;p&gt;Sorry, forgot to attach the log files mentioned above, yesterday.&lt;br/&gt;
The first &apos;umount&apos; command was started at &apos;Jun  2 04:26&apos; via pdsh for all client nodes. umounts were blocked on the client nodes (&apos;D&apos;).&lt;br/&gt;
I&apos;ll try to reproduce the error for the current build &apos;20160601&apos; (&lt;a href=&quot;https://wiki.hpdd.intel.com/pages/viewpage.action?title=Soak+Testing+on+Lola&amp;amp;spaceKey=Releases#SoakTestingonLola-20160601&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.hpdd.intel.com/pages/viewpage.action?title=Soak+Testing+on+Lola&amp;amp;spaceKey=Releases#SoakTestingonLola-20160601&lt;/a&gt;) with command tracing enabled.&lt;/p&gt;</comment>
                            <comment id="154552" author="heckes" created="Fri, 3 Jun 2016 07:56:45 +0000"  >&lt;p&gt;I also took debug logs on all server nodes. I didn&apos;t upload them, because of the big amount of data. Please let me know if they are needed.&lt;/p&gt;</comment>
                            <comment id="154556" author="heckes" created="Fri, 3 Jun 2016 09:11:09 +0000"  >&lt;p&gt;I couldn&apos;t reproduce the error with build &apos;20160601&apos;.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="21727" name="console-lola-26.log.bz2" size="13626" author="heckes" created="Fri, 3 Jun 2016 07:45:06 +0000"/>
                            <attachment id="21728" name="lola-26-lustre-log-20160602-0504.bz2" size="7174663" author="heckes" created="Fri, 3 Jun 2016 07:45:06 +0000"/>
                            <attachment id="21729" name="messages-lola-26.log.bz2" size="201585" author="heckes" created="Fri, 3 Jun 2016 07:45:06 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzydg7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>