<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:24:06 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-9200] sanity-lfsck: failed to mount OST</title>
                <link>https://jira.whamcloud.com/browse/LU-9200</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f11eec1a-f8f8-11e6-aac4-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f11eec1a-f8f8-11e6-aac4-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;server and client: lustre-master tag-2.9.53, el7 zfs&lt;/p&gt;

&lt;p&gt;there is no error log,  this failure causes sanity-lfsck cannot be run, also affect the following 2 test suites.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Starting ost1:   lustre-ost1/ost1 /mnt/lustre-ost1
CMD: trevis-36vm8 mkdir -p /mnt/lustre-ost1; mount -t lustre   		                   lustre-ost1/ost1 /mnt/lustre-ost1
trevis-36vm8: mount.lustre: mount lustre-ost1/ost1 at /mnt/lustre-ost1 failed: Cannot send after transport endpoint shutdown
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="44638">LU-9200</key>
            <summary>sanity-lfsck: failed to mount OST</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 9 Mar 2017 21:41:31 +0000</created>
                <updated>Wed, 3 Mar 2021 20:55:04 +0000</updated>
                                            <version>Lustre 2.10.0</version>
                    <version>Lustre 2.11.0</version>
                    <version>Lustre 2.10.2</version>
                    <version>Lustre 2.10.5</version>
                    <version>Lustre 2.10.7</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="188503" author="adilger" created="Wed, 15 Mar 2017 18:31:52 +0000"  >&lt;p&gt;There isn&apos;t much in the logs.  It looks like there may have been a service still running from the previous test that was not unmounted cleanly, and this was preventing the new OST mount from failing.  Will see if this happens again.&lt;/p&gt;</comment>
                            <comment id="221141" author="jamesanunez" created="Thu, 15 Feb 2018 23:00:49 +0000"  >&lt;p&gt;We have seen this failure several times since this ticket was opened. A recent example of this failure is at&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/874a01ea-111f-11e8-a7cd-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/874a01ea-111f-11e8-a7cd-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In the OSS console log, we see the mount fail&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  274.852821] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-ost1
[  275.205895] Lustre: DEBUG MARKER: lsmod | grep zfs &amp;gt;&amp;amp;/dev/null || modprobe zfs;
[  275.205895] 			zpool list -H lustre-ost1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 ||
[  275.205895] 			zpool import -f -o cachefile=none -o failmode=panic -d /dev/lvm-Role_OSS lustre-ost1
[  275.688869] Lustre: DEBUG MARKER: zfs get -H -o value 						lustre:svname lustre-ost1/ost1
[  276.020363] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-ost1; mount -t lustre   		                   lustre-ost1/ost1 /mnt/lustre-ost1
[  284.401047] LustreError: 15f-b: lustre-OST0000: cannot register this server with the MGS: rc = -108. Is the MGS running?
[  284.403459] LustreError: 9448:0:(obd_mount_server.c:1934:server_fill_super()) Unable to start targets: -108
[  284.405669] LustreError: 9448:0:(obd_mount_server.c:1586:server_put_super()) no obd lustre-OST0000
[  284.408071] LustreError: 9448:0:(obd_mount_server.c:132:server_deregister_mount()) lustre-OST0000 not registered
[  284.410875] Lustre: server umount lustre-OST0000 complete
[  284.412717] LustreError: 9448:0:(obd_mount.c:1583:lustre_fill_super()) Unable to mount  (-108)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In this case, lustre-initialization took place right before sanity-lfsck failed mounting OST0. &lt;/p&gt;</comment>
                            <comment id="221327" author="jamesanunez" created="Tue, 20 Feb 2018 22:03:15 +0000"  >&lt;p&gt;There&apos;s, what looks like, another instance of this with Ubuntu clients, at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/49dd5b22-12b9-11e8-a6ad-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/49dd5b22-12b9-11e8-a6ad-52540065bddc&lt;/a&gt;. In this case, the console log for the OSS has an interesting message about the obd reference&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  242.176504] Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 2. Is it stuck?
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="222569" author="bzzz" created="Tue, 6 Mar 2018 16:53:29 +0000"  >&lt;p&gt;AFAICS, I&apos;m getting this very frequently. for example, &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/bdf41958-f133-4ab6-a190-a7d9cdfd785f&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/bdf41958-f133-4ab6-a190-a7d9cdfd785f&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="231940" author="jamesanunez" created="Tue, 14 Aug 2018 20:14:54 +0000"  >&lt;p&gt;For Lustre 2.10.5 RC1, we see this issue with the OST mount cause sanity-lfsck to fail without running any tests, but we also see the next three test suites; sanityn, sanity-hsm, sanity-lsnapshot; fail do to this issue. &lt;/p&gt;

&lt;p&gt;An example of this is at &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/1e4b0840-3c0f-4c74-8d51-0005c0b498c8&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/1e4b0840-3c0f-4c74-8d51-0005c0b498c8&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzz6iv:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>