<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:11:05 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7690] sanity-lfsck: couldn&apos;t mount ost</title>
                <link>https://jira.whamcloud.com/browse/LU-7690</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/c272f136-bbb9-11e5-a592-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/c272f136-bbb9-11e5-a592-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;no logs&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: shadow-20vm4 mkdir -p /mnt/mds1; mount -t lustre   		                   /dev/lvm-Role_MDS/P1 /mnt/mds1
CMD: shadow-20vm4 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
shadow-20vm4: mpi/openmpi-x86_64(5):ERROR:150: Module &apos;mpi/openmpi-x86_64&apos; conflicts with the currently loaded module(s) &apos;mpi/compat-openmpi16-x86_64&apos;
shadow-20vm4: mpi/openmpi-x86_64(5):ERROR:102: Tcl command execution failed: conflict		mpi
shadow-20vm4: 
CMD: shadow-20vm4 e2label /dev/lvm-Role_MDS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: shadow-20vm4 e2label /dev/lvm-Role_MDS/P1 				2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: shadow-20vm4 e2label /dev/lvm-Role_MDS/P1 2&amp;gt;/dev/null
Started lustre-MDT0000
CMD: shadow-20vm3 mkdir -p /mnt/ost1
CMD: shadow-20vm3 test -b /dev/lvm-Role_OSS/P1
Starting ost1:   /dev/lvm-Role_OSS/P1 /mnt/ost1
CMD: shadow-20vm3 mkdir -p /mnt/ost1; mount -t lustre   		                   /dev/lvm-Role_OSS/P1 /mnt/ost1
shadow-20vm3: mount.lustre: mount /dev/mapper/lvm--Role_OSS-P1 at /mnt/ost1 failed: Cannot send after transport endpoint shutdown
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server: lustre-master build#3305 RHEL7.1&lt;br/&gt;
client: lustre-master build#3305 RHEL6.7</environment>
        <key id="34191">LU-7690</key>
            <summary>sanity-lfsck: couldn&apos;t mount ost</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="ys">Yang Sheng</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 20 Jan 2016 07:44:44 +0000</created>
                <updated>Wed, 16 Jan 2019 06:27:47 +0000</updated>
                            <resolved>Wed, 16 Jan 2019 06:27:47 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                    <version>Lustre 2.9.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="139414" author="sarah" created="Wed, 20 Jan 2016 07:46:11 +0000"  >&lt;p&gt;this issue also hit on saniyn and sanity-hsm, it blocks these 3 tests&lt;/p&gt;</comment>
                            <comment id="139776" author="pjones" created="Fri, 22 Jan 2016 19:04:25 +0000"  >&lt;p&gt;Yang Sheng&lt;/p&gt;

&lt;p&gt;Could you please look into this issue?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="141849" author="standan" created="Wed, 10 Feb 2016 21:49:49 +0000"  >&lt;p&gt;Another instance found for interop tag 2.7.66 - EL7 Server/2.7.1 Client, build# 3316&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b0e34a2c-cc91-11e5-b80c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b0e34a2c-cc91-11e5-b80c-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for interop tag 2.7.66 - EL6.7 Server/2.7.1 Client, build# 3316&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/55c68b16-cc98-11e5-b80c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/55c68b16-cc98-11e5-b80c-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/55b4ae1e-cc98-11e5-b80c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/55b4ae1e-cc98-11e5-b80c-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/559ab798-cc98-11e5-b80c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/559ab798-cc98-11e5-b80c-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for interop tag 2.7.66 - EL7 Server/2.5.5 Client, build# 3316&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/799447ec-cc46-11e5-901d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/799447ec-cc46-11e5-901d-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7986c66c-cc46-11e5-901d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7986c66c-cc46-11e5-901d-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/79768c7a-cc46-11e5-901d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/79768c7a-cc46-11e5-901d-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL6.7 Server/EL6.7 Client - DNE, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/736c38b2-ca83-11e5-9215-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/736c38b2-ca83-11e5-9215-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="142856" author="sarah" created="Thu, 18 Feb 2016 19:36:51 +0000"  >&lt;p&gt;So I resubmit the request on onyx to rerun the tests and didn&apos;t hit this issue:&lt;br/&gt;
testing ran on 2/13/2016&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/1eb9ee30-d380-11e5-bf08-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/1eb9ee30-d380-11e5-bf08-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Just talked with Saurabh, he still saw the issue recently:&lt;br/&gt;
testing also ran on 2/13/2016&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="142860" author="standan" created="Thu, 18 Feb 2016 19:54:58 +0000"  >&lt;p&gt;Another instance of the above failure:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/84c4f3c6-d530-11e5-bc47-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/84c4f3c6-d530-11e5-bc47-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Also, the result mentioned above by Sarah ran on 0/13/2016 for tag 2.7.90 ran on onyx as well and still failed.&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="143614" author="standan" created="Wed, 24 Feb 2016 17:33:12 +0000"  >&lt;p&gt;Another instance found for interop - EL7 Server/2.5.5 Client, tag 2.7.90. &lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/93baffee-d2ae-11e5-8697-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="166040" author="jamesanunez" created="Wed, 14 Sep 2016 17:06:50 +0000"  >&lt;p&gt;I have a similar failure with logs at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/cb9dc566-79a4-11e6-8a8c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/cb9dc566-79a4-11e6-8a8c-5254006e85c2&lt;/a&gt;. &lt;/p&gt;

&lt;p&gt;The suite_stdout log has the same error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;04:22:09:Starting ost1:   /dev/lvm-Role_OSS/P1 /mnt/lustre-ost1
04:22:09:CMD: onyx-38vm8 mkdir -p /mnt/lustre-ost1; mount -t lustre   		                   /dev/lvm-Role_OSS/P1 /mnt/lustre-ost1
04:22:10:onyx-38vm8: mount.lustre: mount /dev/mapper/lvm--Role_OSS-P1 at /mnt/lustre-ost1 failed: Cannot send after transport endpoint shutdown
04:22:10:sanity-lfsck returned 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In the logs for the OST (vm8), there&apos;s an interesting error when trying to mount the OST&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;04:22:16:[  402.226223] Lustre: Evicted from MGS (at 10.2.4.171@tcp) after server handle changed from 0xbc1d6e975ee7dac5 to 0xbc1d6e975ee7e8d3
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="240075" author="ys" created="Wed, 16 Jan 2019 06:27:47 +0000"  >&lt;p&gt;Please reopen it if hit again.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="30375">LU-6650</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="48513">LU-10045</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxylz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>