<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:11:44 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7767] Failover: replay-dual test_3: test_3 returned 1</title>
                <link>https://jira.whamcloud.com/browse/LU-7767</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Saurabh Tandan &amp;lt;saurabh.tandan@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7d01907a-cb55-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7d01907a-cb55-11e5-b49e-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_3 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test_3 returned 1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;test log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== replay-dual test 3: |X| mkdir adir, mkdir adir/bdir == 07:34:39 (1454571279)
CMD: shadow-45vm7 sync; sync; sync
Filesystem           1K-blocks  Used Available Use% Mounted on
shadow-45vm7:shadow-45vm3:/lustre
                      14220416 16128  14189952   1% /mnt/lustre
CMD: shadow-45vm1.shadow.whamcloud.com,shadow-45vm5,shadow-45vm6 mcreate /mnt/lustre/fsa-\$(hostname); rm /mnt/lustre/fsa-\$(hostname)
CMD: shadow-45vm1.shadow.whamcloud.com,shadow-45vm5,shadow-45vm6 if [ -d /mnt/lustre2 ]; then mcreate /mnt/lustre2/fsa-\$(hostname); rm /mnt/lustre2/fsa-\$(hostname); fi
CMD: shadow-45vm7 /usr/sbin/lctl --device lustre-MDT0000 notransno
CMD: shadow-45vm7 /usr/sbin/lctl --device lustre-MDT0000 readonly
CMD: shadow-45vm7 /usr/sbin/lctl mark mds1 REPLAY BARRIER on lustre-MDT0000
CMD: shadow-45vm7 /usr/sbin/lctl dl
Failing mds1 on shadow-45vm7
+ pm -h powerman --off shadow-45vm7
Command completed successfully
reboot facets: mds1
+ pm -h powerman --on shadow-45vm7
Command completed successfully
Failover mds1 to shadow-45vm3
07:34:56 (1454571296) waiting for shadow-45vm3 network 900 secs ...
07:34:56 (1454571296) network interface is UP
CMD: shadow-45vm3 hostname
pdsh@shadow-45vm1: shadow-45vm3: mcmd: connect failed: Connection refused
CMD: shadow-45vm3 hostname
mount facets: mds1
CMD: shadow-45vm3 zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 ||
			zpool import -f -o cachefile=none -d /dev/lvm-Role_MDS lustre-mdt1
Starting mds1:   lustre-mdt1/mdt1 /mnt/mds1
CMD: shadow-45vm3 mkdir -p /mnt/mds1; mount -t lustre   		                   lustre-mdt1/mdt1 /mnt/mds1
CMD: shadow-45vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: shadow-45vm3 zfs get -H -o value 				lustre:svname lustre-mdt1/mdt1 2&amp;gt;/dev/null | 				grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: shadow-45vm3 zfs get -H -o value 				lustre:svname lustre-mdt1/mdt1 2&amp;gt;/dev/null | 				grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
CMD: shadow-45vm3 zfs get -H -o value lustre:svname 		                           lustre-mdt1/mdt1 2&amp;gt;/dev/null
Started lustre-MDT0000
CMD: shadow-45vm1.shadow.whamcloud.com,shadow-45vm5,shadow-45vm6 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh wait_import_state_mount FULL mdc.lustre-MDT0000-mdc-*.mds_server_uuid 
shadow-45vm1: CMD: shadow-45vm1.shadow.whamcloud.com lctl get_param -n at_max
shadow-45vm5: CMD: shadow-45vm5.shadow.whamcloud.com lctl get_param -n at_max
shadow-45vm6: CMD: shadow-45vm6.shadow.whamcloud.com lctl get_param -n at_max
shadow-45vm1: mdc.lustre-MDT0000-mdc-*.mds_server_uuid in FULL state after 17 sec
shadow-45vm5: mdc.lustre-MDT0000-mdc-*.mds_server_uuid in FULL state after 17 sec
shadow-45vm6: mdc.lustre-MDT0000-mdc-*.mds_server_uuid in FULL state after 17 sec
Resetting fail_loc on all nodes...CMD: shadow-45vm1.shadow.whamcloud.com,shadow-45vm3,shadow-45vm5,shadow-45vm6,shadow-45vm8 lctl set_param -n fail_loc=0 	    fail_val=0 2&amp;gt;/dev/null || true
pdsh@shadow-45vm1: shadow-45vm8: mcmd: xpoll (setting up stderr): Interrupted system call
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>EL6.7 Server/Client - ZFS&lt;br/&gt;
master, build# 3314</environment>
        <key id="34586">LU-7767</key>
            <summary>Failover: replay-dual test_3: test_3 returned 1</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 9 Feb 2016 18:56:36 +0000</created>
                <updated>Mon, 27 Feb 2017 23:36:50 +0000</updated>
                            <resolved>Mon, 27 Feb 2017 23:36:50 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="141927" author="jamesanunez" created="Thu, 11 Feb 2016 00:08:37 +0000"  >&lt;p&gt;I&apos;m seeing similar &apos;mcmd: xpoll&apos; errors in sanity-scrub tests 1c and 11&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;pdsh@shadow-45vm1: shadow-45vm8: mcmd: xpoll (setting up stderr): Interrupted system call
 sanity-scrub test_1c: @@@@@@ FAIL: server shadow-45vm8 environments are insane! 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;and in sanity-lfsck test_19a&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cat: /mnt/lustre/d19a.sanity-lfsck/a0: Input/output error
fail_loc=0
Resetting fail_loc on all nodes...CMD: shadow-45vm1.shadow.whamcloud.com,shadow-45vm2,shadow-45vm3,shadow-45vm7,shadow-45vm8 lctl set_param -n fail_loc=0 	    fail_val=0 2&amp;gt;/dev/null || true
pdsh@shadow-45vm1: shadow-45vm8: mcmd: xpoll (setting up stderr): Interrupted system call
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Logs are at &lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/ddd50a9c-d002-11e5-be99-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/ddd50a9c-d002-11e5-be99-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="186378" author="adilger" created="Mon, 27 Feb 2017 23:36:50 +0000"  >&lt;p&gt;This looks like it was an environment problem.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzy0u7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>