<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:19:46 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1796] Test failure on test suite recovery-mds-scale, subtest test_failover_ost</title>
                <link>https://jira.whamcloud.com/browse/LU-1796</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/58611dea-ef02-11e1-9426-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/58611dea-ef02-11e1-9426-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_failover_ost failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;import is not in FULL state&lt;/p&gt;&lt;/blockquote&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: /usr/sbin/lctl mark == recovery-mds-scale test failover_ost: failover OST ================================================ 13:35:20 \(1345840520\)
Lustre: DEBUG MARKER: == recovery-mds-scale test failover_ost: failover OST ================================================ 13:35:20 (1345840520)
Lustre: DEBUG MARKER: /usr/sbin/lctl mark Started client load: dd on client-26vm1
Lustre: DEBUG MARKER: Started client load: dd on client-26vm1
Lustre: DEBUG MARKER: /usr/sbin/lctl mark Started client load: tar on client-26vm2
Lustre: DEBUG MARKER: Started client load: tar on client-26vm2
Lustre: DEBUG MARKER: /usr/sbin/lctl mark Started client load: dbench on client-26vm5
Lustre: DEBUG MARKER: Started client load: dbench on client-26vm5
Lustre: DEBUG MARKER: /usr/sbin/lctl mark ==== Checking the clients loads BEFORE failover -- failure NOT OK              ELAPSED=0 DURATION=86400 PERIOD=900
Lustre: DEBUG MARKER: ==== Checking the clients loads BEFORE failover -- failure NOT OK ELAPSED=0 DURATION=86400 PERIOD=900
Lustre: DEBUG MARKER: /usr/sbin/lctl mark Wait ost6 recovery complete before doing next failover...
Lustre: DEBUG MARKER: Wait ost6 recovery complete before doing next failover...
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
Lustre: DEBUG MARKER: /usr/sbin/lctl mark Checking clients are in FULL state before doing next failover...
Lustre: DEBUG MARKER: Checking clients are in FULL state before doing next failover...
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0002-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0003-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: DEBUG MARKER: osc.lustre-OST0004-osc-[^M]*.ost_server_uuid in FULL state after 0 sec
Lustre: lustre-OST0005: Export ffff8800720f2400 already connecting from 10.10.4.150@tcp
Lustre: Skipped 26 previous similar messages
Lustre: lustre-OST0005: Client 33159da3-4758-1c50-a4ed-0926567964fa (at 10.10.4.152@tcp) reconnecting
Lustre: Skipped 355 previous similar messages
Lustre: lustre-OST0005: Client 33159da3-4758-1c50-a4ed-0926567964fa (at 10.10.4.152@tcp) refused reconnection, still busy with 6 active RPCs
Lustre: Skipped 355 previous similar messages
Lustre: DEBUG MARKER: /usr/sbin/lctl mark  rpc : @@@@@@ FAIL: can\&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN 
Lustre: DEBUG MARKER: rpc : @@@@@@ FAIL: can&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:./../utils:/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests
Lustre: DEBUG MARKER: /usr/sbin/lctl mark  rpc : @@@@@@ FAIL: can\&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN 
Lustre: DEBUG MARKER: /usr/sbin/lctl mark  rpc : @@@@@@ FAIL: can\&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN 
Lustre: DEBUG MARKER: rpc : @@@@@@ FAIL: can&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN
Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /tmp/test_logs/1345840602/rpc..debug_log.$(hostname -s).1345841358.log;
         dmesg &amp;gt; /tmp/test_logs/1345840602/rpc..dmesg.$(hostname -s).1345841358.log
Lustre: DEBUG MARKER: rpc : @@@@@@ FAIL: can&apos;t put import for osc.lustre-OST0005-osc-[^M]*.ost_server_uuid into FULL state after 662 sec, have DISCONN
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:./../utils:/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:./../utils:/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
cannot allocate a tage (2)
Lustre: lustre-OST0005: Export ffff8800720f2400 already connecting from 10.10.4.150@tcp
Lustre: Skipped 37 previous similar messages
Lustre: DEBUG MARKER: rsync -az /tmp/test_logs/1345840602/rpc..*.1345841358.log client-26vm5.lab.whamcloud.com:/tmp/test_logs/1345840602
Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /tmp/test_logs/1345840602/rpc..debug_log.$(hostname -s).1345841372.log;
         dmesg &amp;gt; /tmp/test_logs/1345840602/rpc..dmesg.$(hostname -s).1345841372.log
Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /tmp/test_logs/1345840604/rpc..debug_log.$(hostname -s).1345841379.log;
         dmesg &amp;gt; /tmp/test_logs/1345840604/rpc..dmesg.$(hostname -s).1345841379.log
Lustre: DEBUG MARKER: rsync -az /tmp/test_logs/1345840602/rpc..*.1345841372.log client-26vm1.lab.whamcloud.com:/tmp/test_logs/1345840602
Lustre: DEBUG MARKER: rsync -az /tmp/test_logs/1345840604/rpc..*.1345841379.log client-26vm2.lab.whamcloud.com:/tmp/test_logs/1345840604
Lustre: DEBUG MARKER: /usr/sbin/lctl mark  recovery-mds-scale test_failover_ost: @@@@@@ FAIL: import is not in FULL state 
Lustre: DEBUG MARKER: recovery-mds-scale test_failover_ost: @@@@@@ FAIL: import is not in FULL state
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="15615">LU-1796</key>
            <summary>Test failure on test suite recovery-mds-scale, subtest test_failover_ost</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 28 Aug 2012 13:15:02 +0000</created>
                <updated>Mon, 29 May 2017 05:09:22 +0000</updated>
                            <resolved>Mon, 29 May 2017 05:09:22 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>1</watches>
                                                                            <comments>
                            <comment id="197413" author="adilger" created="Mon, 29 May 2017 05:09:22 +0000"  >&lt;p&gt;Close old ticket.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvenb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6014</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>