<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:20:54 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-15741] replay-dual test_0b: mgc_request.c:253:do_config_log_add()) MGC10.240.41.18@tcp: failed processing log</title>
                <link>https://jira.whamcloud.com/browse/LU-15741</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Cliff White &amp;lt;cwhite@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/a6e98d16-acd5-4901-b6a3-45addaa864f8&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a6e98d16-acd5-4901-b6a3-45addaa864f8&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The failure is for replay-dual test 0b.&lt;/p&gt;

&lt;p&gt;Mount fails on client, may be issue with test systems&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 8922.500173] Lustre: Unmounted lustre-client
[ 8955.589567] Did not receive response to NOPIN on CID: 0, failing connection for I_T Nexus iqn.1994-05.com.redhat:de2392d78ab4,i,0x00023d000001,iqn.2018-06.com.trevis-59vm1:target,t,0x01
[ 8963.157715] iSCSI/iqn.1994-05.com.redhat:de2392d78ab4: Unsupported SCSI Opcode 0xa3, sending CHECK_CONDITION.
[ 8979.897072] Lustre: setting import lustre-MDT0000_UUID INACTIVE by administrator request
[ 8979.899008] Lustre: Skipped 7 previous similar messages
[ 8990.171882] Lustre: Unmounted lustre-client
[ 8990.181417] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre
[ 8990.193085] Lustre: DEBUG MARKER: mount -t lustre -o user_xattr,flock trevis-59vm7:trevis-59vm8:/lustre /mnt/lustre
[ 8996.355848] LustreError: 124531:0:(mgc_request.c:253:do_config_log_add()) MGC10.240.41.18@tcp: failed processing log, type 1: rc = -5
[ 9007.619313] LustreError: 15c-8: MGC10.240.41.18@tcp: Confguration from log lustre-client failed from MGS -5. Communication error between node &amp;amp; MGS, a bad configuration, or other errors. See syslog for more info
[ 9007.623006] Lustre: Unmounted lustre-client
[ 9007.624191] LustreError: 124531:0:(super25.c:176:lustre_fill_super()) llite: Unable to mount &amp;lt;unknown&amp;gt;: rc = -5
[ 9008.020069] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  replay-dual test_0b: @@@@@@ FAIL: mount1 fais 
[ 9008.486992] Lustre: DEBUG MARKER: replay-dual test_0b: @@@@@@ FAIL: mount1 fais
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt; </description>
                <environment></environment>
        <key id="69730">LU-15741</key>
            <summary>replay-dual test_0b: mgc_request.c:253:do_config_log_add()) MGC10.240.41.18@tcp: failed processing log</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 14 Apr 2022 15:58:30 +0000</created>
                <updated>Mon, 12 Jun 2023 23:04:46 +0000</updated>
                                            <version>Lustre 2.15.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="337065" author="sarah" created="Wed, 8 Jun 2022 17:17:33 +0000"  >&lt;p&gt;similar but seems caused by network issue&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/38dfa3a3-fc22-4e35-909d-d07ef37a5178&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/38dfa3a3-fc22-4e35-909d-d07ef37a5178&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Command completed successfully
waiting ping -c 1 -w 3 trevis-33vm8, 900 secs left ...
waiting ping -c 1 -w 3 trevis-33vm8, 895 secs left ...
waiting ping -c 1 -w 3 trevis-33vm8, 890 secs left ...
CMD: trevis-33vm8 hostname
trevis-33vm8: ssh: connect to host trevis-33vm8 port 22: Connection refused
pdsh@trevis-33vm1: trevis-33vm8: ssh exited with exit code 255
CMD: trevis-33vm8 hostname
Failover mds1 to trevis-33vm7
CMD: trevis-33vm7 hostname
mount facets: mds1
CMD: trevis-33vm7 lsmod | grep zfs &amp;gt;&amp;amp;/dev/null || modprobe zfs;
			zpool list -H lustre-mdt1 &amp;gt;/dev/null 2&amp;gt;&amp;amp;1 ||
			zpool import -f -o cachefile=none -o failmode=panic -d /dev/lvm-Role_MDS lustre-mdt1
CMD: trevis-33vm7 zfs get -H -o value 						lustre:svname lustre-mdt1/mdt1
Starting mds1: -o localrecov  lustre-mdt1/mdt1 /mnt/lustre-mds1
CMD: trevis-33vm7 mkdir -p /mnt/lustre-mds1; mount -t lustre -o localrecov  lustre-mdt1/mdt1 /mnt/lustre-mds1
CMD: trevis-33vm7 /usr/sbin/lctl get_param -n health_check
CMD: trevis-33vm7 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/share/Modules/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/sbin:/sbin:/bin::/sbin:/bin:/usr/sbin: NAME=autotest_config bash rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all\&quot; 4 
trevis-33vm7: CMD: trevis-33vm7 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null
trevis-33vm7: CMD: trevis-33vm7 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null
trevis-33vm7: CMD: trevis-33vm5 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null
trevis-33vm7: CMD: trevis-33vm7.trevis.whamcloud.com /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null
trevis-33vm7: trevis-33vm7.trevis.whamcloud.com: executing set_default_debug -1 all 4
CMD: trevis-33vm7 zfs get -H -o value 				lustre:svname lustre-mdt1/mdt1 2&amp;gt;/dev/null | 				grep -E &apos;:[a-zA-Z]{3}[0-9]{4}&apos;
pdsh@trevis-33vm1: trevis-33vm7: ssh exited with exit code 1
CMD: trevis-33vm7 zfs get -H -o value lustre:svname 		                           lustre-mdt1/mdt1 2&amp;gt;/dev/null
Started lustre-MDT0000
Starting client: trevis-33vm1.trevis.whamcloud.com:  -o user_xattr,flock trevis-33vm7:trevis-33vm8:/lustre /mnt/lustre
CMD: trevis-33vm1.trevis.whamcloud.com mkdir -p /mnt/lustre
CMD: trevis-33vm1.trevis.whamcloud.com mount -t lustre -o user_xattr,flock trevis-33vm7:trevis-33vm8:/lustre /mnt/lustre
mount.lustre: mount trevis-33vm7:trevis-33vm8:/lustre at /mnt/lustre failed: Input/output error
Is the MGS running?
 replay-dual test_0b: @@@@@@ FAIL: mount1 fais 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="67645">LU-15375</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="76415">LU-16874</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i02n33:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>