<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:43:03 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4475] mount command errors:  &quot;Communicating with 0@lo, operation mds_connect failed with -11&quot;  AND &quot;Transport endpoint is not connected&quot;</title>
                <link>https://jira.whamcloud.com/browse/LU-4475</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>
&lt;p&gt;I created the mgs/mdt:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;mkfs.lustre --fsname=lfs1 --mgs --mdt --index=0 /dev/vg_root/es0-00
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;and the ost, on another node:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;mkfs.lustre --fsname=lfs1 --mgsnode=172.18.54.21@tcp0 --ost --index=0 /dev/vg_root/es2-00
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;When mounting either I receive a comm error. &lt;/p&gt;

&lt;p&gt;When mounting the ost I see &quot;Transport endpoint is not connected&quot;:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;# mount -vvv -t lustre /dev/dm-3 /mnt/ost0
mount: fstab path: &lt;span class=&quot;code-quote&quot;&gt;&quot;/etc/fstab&quot;&lt;/span&gt;
mount: mtab path:  &lt;span class=&quot;code-quote&quot;&gt;&quot;/etc/mtab&quot;&lt;/span&gt;
mount: lock path:  &lt;span class=&quot;code-quote&quot;&gt;&quot;/etc/mtab~&quot;&lt;/span&gt;
mount: temp path:  &lt;span class=&quot;code-quote&quot;&gt;&quot;/etc/mtab.tmp&quot;&lt;/span&gt;
mount: UID:        0
mount: eUID:       0
mount: spec:  &lt;span class=&quot;code-quote&quot;&gt;&quot;/dev/mapper/vg_root-es2--00&quot;&lt;/span&gt;
mount: node:  &lt;span class=&quot;code-quote&quot;&gt;&quot;/mnt/ost0&quot;&lt;/span&gt;
mount: types: &lt;span class=&quot;code-quote&quot;&gt;&quot;lustre&quot;&lt;/span&gt;
mount: opts:  &lt;span class=&quot;code-quote&quot;&gt;&quot;(&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)&quot;&lt;/span&gt;
&lt;span class=&quot;code-keyword&quot;&gt;final&lt;/span&gt; mount options: &lt;span class=&quot;code-quote&quot;&gt;&apos;(&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)&apos;&lt;/span&gt;
mount: external mount: argv[0] = &lt;span class=&quot;code-quote&quot;&gt;&quot;/sbin/mount.lustre&quot;&lt;/span&gt;
mount: external mount: argv[1] = &lt;span class=&quot;code-quote&quot;&gt;&quot;/dev/mapper/vg_root-es2--00&quot;&lt;/span&gt;
mount: external mount: argv[2] = &lt;span class=&quot;code-quote&quot;&gt;&quot;/mnt/ost0&quot;&lt;/span&gt;
mount: external mount: argv[3] = &lt;span class=&quot;code-quote&quot;&gt;&quot;-v&quot;&lt;/span&gt;
mount: external mount: argv[4] = &lt;span class=&quot;code-quote&quot;&gt;&quot;-o&quot;&lt;/span&gt;
mount: external mount: argv[5] = &lt;span class=&quot;code-quote&quot;&gt;&quot;rw&quot;&lt;/span&gt;
arg[0] = /sbin/mount.lustre
arg[1] = -v
arg[2] = -o
arg[3] = rw
arg[4] = /dev/mapper/vg_root-es2--00
arg[5] = /mnt/ost0
source = /dev/mapper/vg_root-es2--00 (/dev/mapper/vg_root-es2--00), target = /mnt/ost0
options = rw
checking &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; existing Lustre data: found
Reading CONFIGS/mountdata
mounting device /dev/mapper/vg_root-es2--00 at /mnt/ost0, flags=0x1000000 options=osd=osd-ldiskfs,errors=remount-ro,mgsnode=172.18.54.21@tcp,virgin,param=mgsnode=172.18.54.21@tcp,svname=lfs1-OST0000,device=/dev/mapper/vg_root-es2--00
mount.lustre: mount /dev/mapper/vg_root-es2--00 at /mnt/ost0 failed: Transport endpoint is not connected retries left: 0
mount.lustre: mount /dev/mapper/vg_root-es2--00 at /mnt/ost0 failed: Transport endpoint is not connected

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;And when mounting the mgs/mdt I see &quot;Communicating with 0@lo, operation mds_connect failed with -11&quot;:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Jan 11 11:14:30 es0 kernel: LDISKFS-fs (dm-3): mounted filesystem with ordered data mode. quota=on. Opts: 
Jan 11 11:14:30 es0 kernel: Lustre: lfs1-MDT0000: used disk, loading
Jan 11 11:14:30 es0 kernel: LustreError: 11-0: lfs1-MDT0000-lwp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The communication looks fine between nodes:&lt;/p&gt;

&lt;p&gt;From es0:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;[root@es0 log]# lctl
lctl &amp;gt; ping es2
12345-0@lo
12345-172.18.54.23@tcp
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;From es2:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;[root@es2 log]# lctl
lctl &amp;gt; ping es0
12345-0@lo
12345-172.18.54.21@tcp
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;



</description>
                <environment># uname -a&lt;br/&gt;
Linux es0 2.6.32-358.23.2.el6_lustre.x86_64 #1 SMP Thu Dec 19 19:57:45 PST 2013 x86_64 x86_64 x86_64 GNU/Linux&lt;br/&gt;
&lt;br/&gt;
# rpm -qa | egrep &amp;quot;lustre|e2fs&amp;quot; | sort&lt;br/&gt;
e2fsprogs-1.42.7.wc2-7.el6.x86_64&lt;br/&gt;
e2fsprogs-libs-1.42.7.wc2-7.el6.x86_64&lt;br/&gt;
kernel-2.6.32-358.23.2.el6_lustre.x86_64&lt;br/&gt;
kernel-firmware-2.6.32-358.23.2.el6_lustre.x86_64&lt;br/&gt;
lustre-2.4.2-2.6.32_358.23.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
lustre-ldiskfs-4.1.0-2.6.32_358.23.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
lustre-modules-2.4.2-2.6.32_358.23.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
lustre-osd-ldiskfs-2.4.2-2.6.32_358.23.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
&lt;br/&gt;
</environment>
        <key id="22714">LU-4475</key>
            <summary>mount command errors:  &quot;Communicating with 0@lo, operation mds_connect failed with -11&quot;  AND &quot;Transport endpoint is not connected&quot;</summary>
                <type id="6" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11315&amp;avatarType=issuetype">Story</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="mduff1">Mark Duffield</reporter>
                        <labels>
                            <label>llnl</label>
                    </labels>
                <created>Sat, 11 Jan 2014 16:20:43 +0000</created>
                <updated>Thu, 9 Jun 2016 19:48:31 +0000</updated>
                                            <version>Lustre 2.4.2</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="85859" author="behlendorf" created="Thu, 5 Jun 2014 16:53:53 +0000"  >&lt;p&gt;I&apos;m able consistently reproduce this with 2.4.2 and just the llmount.sh script.  I haven&apos;t had a chance yet to investigate further.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;FSTYPE=zfs /usr/lib64/lustre/tests/llmount.sh
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;dmesg output&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: Lustre: Build Version: 2.4.2-7behlendorf-7behlendorf-1-PRISTINE-2.6.32-431.17.1.el6.x86_64
LNet: Added LNI 192.168.2.117@tcp [8/256/0/180]
LNet: Accept secure, port 988
Lustre: Echo OBD driver; http://www.lustre.org/
Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripecount in log lustre-MDT0000
Lustre: Skipped 1 previous similar message
Lustre: ctl-lustre-MDT0000: No data found on store. Initialize space
Lustre: lustre-MDT0000: Initializing new disk
LustreError: 11-0: lustre-MDT0000-lwp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11.
SELinux: (dev lustre, type lustre) has no xattr support
Lustre: Failing over lustre-MDT0000
Lustre: server umount lustre-MDT0000 complete
Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
Lustre: Skipped 2 previous similar messages
Lustre: ctl-lustre-MDT0000: No data found on store. Initialize space
Lustre: Skipped 1 previous similar message
Lustre: srv-lustre-MDT0000: No data found on store. Initialize space
Lustre: lustre-MDT0000: Initializing new disk
LustreError: 11-0: lustre-MDT0000-lwp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11.
SELinux: (dev lustre, type lustre) has no xattr support
Lustre: Failing over lustre-MDT0000
Lustre: server umount lustre-MDT0000 complete
Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre&apos; &apos; /proc/mounts); if [ $running -ne 0 ] ; then echo Stopping client $(hostname) /mnt/lustre opts:; lsof /mnt/lustre || need_kill=no; if [ x != x -a x$need_kill != xno ]; then pids=$(lsof -t /mnt/lustre | sort -u); if [ -n &quot;$p
Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre2&apos; &apos; /proc/mounts); if [ $running -ne 0 ] ; then echo Stopping client $(hostname) /mnt/lustre2 opts:; lsof /mnt/lustre2 || need_kill=no; if [ x != x -a x$need_kill != xno ]; then pids=$(lsof -t /mnt/lustre2 | sort -u); if [ -n
Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-mdt1/ /proc/mounts || zpool export lustre-mdt1
Lustre: DEBUG MARKER: grep -c /mnt/ost1&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost1/ /proc/mounts || zpool export lustre-ost1
Lustre: DEBUG MARKER: grep -c /mnt/ost2&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost2 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost2/ /proc/mounts || zpool export lustre-ost2
Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-mdt1/ /proc/mounts || zpool export lustre-mdt1
Lustre: DEBUG MARKER: mkfs.lustre --mgs --fsname=lustre --mdt --index=0 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=zfs --device-size=200000 --reformat lustre-mdt1/mdt1 /tmp/lu
Lustre: DEBUG MARKER: zpool set cachefile=none lustre-mdt1
Lustre: DEBUG MARKER: ! zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-mdt1/ /proc/mounts || zpool export lustre-mdt1
Lustre: DEBUG MARKER: grep -c /mnt/ost1&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost1/ /proc/mounts || zpool export lustre-ost1
Lustre: DEBUG MARKER: mkfs.lustre --mgsnode=ovirt-guest-241@tcp --fsname=lustre --ost --index=0 --param=sys.timeout=20 --backfstype=zfs --device-size=200000 --reformat lustre-ost1/ost1 /tmp/lustre-ost1
Lustre: DEBUG MARKER: zpool set cachefile=none lustre-ost1
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost1/ /proc/mounts || zpool export lustre-ost1
Lustre: DEBUG MARKER: grep -c /mnt/ost2&apos; &apos; /proc/mounts
Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost2 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost2/ /proc/mounts || zpool export lustre-ost2
Lustre: DEBUG MARKER: mkfs.lustre --mgsnode=ovirt-guest-241@tcp --fsname=lustre --ost --index=1 --param=sys.timeout=20 --backfstype=zfs --device-size=200000 --reformat lustre-ost2/ost2 /tmp/lustre-ost2
Lustre: DEBUG MARKER: zpool set cachefile=none lustre-ost2
Lustre: DEBUG MARKER: ! zpool list -H lustre-ost2 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || grep -q ^lustre-ost2/ /proc/mounts || zpool export lustre-ost2
Lustre: DEBUG MARKER: running=$(grep -c /mnt/ost1&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/ost1&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: running=$(grep -c /mnt/ost2&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/ost2&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/mds1&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/mds1&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/lustre&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre2&apos; &apos; /proc/mounts); mpts=$(mount | grep -c /mnt/lustre2&apos; &apos;); if [ $running -ne $mpts ]; then echo $(hostname) env are INSANE!; exit 1; fi
Lustre: DEBUG MARKER: mkdir -p /mnt/mds1
Lustre: DEBUG MARKER: zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 || zpool import -f -o cachefile=none -d /tmp lustre-mdt1
Lustre: DEBUG MARKER: mkdir -p /mnt/mds1; mount -t lustre lustre-mdt1/mdt1 /mnt/mds1
Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
Lustre: Skipped 4 previous similar messages
Lustre: ctl-lustre-MDT0000: No data found on store. Initialize space
Lustre: lustre-MDT0000: Initializing new disk
LustreError: 11-0: lustre-MDT0000-lwp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11.
SELinux: (dev lustre, type lustre) has no xattr support
Lustre: Failing over lustre-MDT0000
Lustre: server umount lustre-MDT0000 complete
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwcof:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12257</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>