<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:52:13 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5524] parallel-scale-nfsv3: FAIL: setup nfs failed!</title>
                <link>https://jira.whamcloud.com/browse/LU-5524</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;parallel-scale-nfsv3 test failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: shadow-40vm7 service nfs restart
shadow-40vm7: Cannot register service: RPC: Unable to receive; errno = Connection refused
shadow-40vm7: rpc.rquotad: unable to register (RQUOTAPROG, RQUOTAVERS, udp).
shadow-40vm7: rpc.nfsd: writing fd to kernel failed: errno 5 (Input/output error)
shadow-40vm7: rpc.nfsd: writing fd to kernel failed: errno 5 (Input/output error)
shadow-40vm7: rpc.nfsd: unable to set any sockets for nfsd
Shutting down NFS daemon: [  OK  ]
Shutting down NFS mountd: [  OK  ]
Shutting down NFS quotas: [  OK  ]
Shutting down RPC idmapd: [  OK  ]
Starting NFS services:  [  OK  ]
Starting NFS quotas: [FAILED]
Starting NFS mountd: [FAILED]
Starting NFS daemon: [FAILED]
CMD: shadow-40vm1,shadow-40vm2.shadow.whamcloud.com chkconfig --list rpcidmapd 2&amp;gt;/dev/null |
			       grep -q rpcidmapd &amp;amp;&amp;amp; service rpcidmapd restart ||
			       true
CMD: shadow-40vm7 exportfs -o rw,async,no_root_squash *:/mnt/lustre         &amp;amp;&amp;amp; exportfs -v
/mnt/lustre   	&amp;lt;world&amp;gt;(rw,async,wdelay,no_root_squash,no_subtree_check)

Mounting NFS clients (version 3)...
CMD: shadow-40vm1,shadow-40vm2.shadow.whamcloud.com mkdir -p /mnt/lustre
CMD: shadow-40vm1,shadow-40vm2.shadow.whamcloud.com mount -t nfs -o nfsvers=3,async                 shadow-40vm7:/mnt/lustre /mnt/lustre
shadow-40vm1: mount.nfs: Connection timed out
shadow-40vm2: mount.nfs: Connection timed out
 parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed!
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;parallel-scale-nfsv4 hit the same failure.&lt;/p&gt;

&lt;p&gt;Maloo reports:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/46866536-28a2-11e4-901f-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/46866536-28a2-11e4-901f-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/46cc3106-28a2-11e4-901f-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/46cc3106-28a2-11e4-901f-5254006e85c2&lt;/a&gt;&lt;/p&gt;</description>
                <environment>Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/82/&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/82/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64</environment>
        <key id="26092">LU-5524</key>
            <summary>parallel-scale-nfsv3: FAIL: setup nfs failed!</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Wed, 20 Aug 2014 20:47:32 +0000</created>
                <updated>Thu, 28 Aug 2014 16:46:31 +0000</updated>
                            <resolved>Thu, 28 Aug 2014 16:46:19 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.5.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="92078" author="yujian" created="Wed, 20 Aug 2014 21:01:28 +0000"  >&lt;p&gt;Lustre client build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_4/73/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_4/73/&lt;/a&gt; (2.4.3)&lt;br/&gt;
Lustre server build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/80/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/80/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/bd0e71cc-2658-11e4-8af9-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/bd0e71cc-2658-11e4-8af9-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/bd2553a6-2658-11e4-8af9-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/bd2553a6-2658-11e4-8af9-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Is this related to the change of &lt;a href=&quot;http://review.whamcloud.com/11246&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11246&lt;/a&gt; ?&lt;/p&gt;</comment>
                            <comment id="92107" author="yujian" created="Thu, 21 Aug 2014 05:25:44 +0000"  >&lt;p&gt;This is blocking parallel-scale-nfsv{3,4} testing on Lustre b2_5 branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3cd2c68e-28e2-11e4-85c7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3cd2c68e-28e2-11e4-85c7-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3ce98bee-28e2-11e4-85c7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3ce98bee-28e2-11e4-85c7-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/bf690ca0-28d0-11e4-85c7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/bf690ca0-28d0-11e4-85c7-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/bf8ec968-28d0-11e4-85c7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/bf8ec968-28d0-11e4-85c7-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="92197" author="green" created="Fri, 22 Aug 2014 03:19:26 +0000"  >&lt;p&gt;I guess kernel update to rhel might have changed something without us noticing and broke nfs.&lt;br/&gt;
I wonder if master still works?&lt;/p&gt;</comment>
                            <comment id="92265" author="yujian" created="Sat, 23 Aug 2014 00:45:42 +0000"  >&lt;p&gt;Here are some instances occurred in the recent month on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/13400e6e-2845-11e4-901f-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/13400e6e-2845-11e4-901f-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/fc293d3a-2818-11e4-8e75-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/fc293d3a-2818-11e4-8e75-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0dc52508-22ca-11e4-b8ac-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0dc52508-22ca-11e4-b8ac-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6239ab1c-15c9-11e4-818c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6239ab1c-15c9-11e4-818c-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/659e6e3e-11e4-11e4-8a56-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/659e6e3e-11e4-11e4-8a56-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;And here are all of the instances against parallel-scale-nfsv3 on master branch:&lt;br/&gt;
&lt;a href=&quot;http://tinyurl.com/qaduwde&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://tinyurl.com/qaduwde&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The kernel update on RHEL6.5 is not the cause.&lt;/p&gt;</comment>
                            <comment id="92270" author="yujian" created="Sat, 23 Aug 2014 02:27:09 +0000"  >&lt;p&gt;By looking into the test sessions on Lustre b2_5 build #83, I found that only SLES11SP3 client + RHEL6.5 server test session hit this issue:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/52a73644-28e1-11e4-85c7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/52a73644-28e1-11e4-85c7-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Other test sessions did not hit this issue. Maybe this is a sporadic test environment issue? Let&apos;s wait for the test results of Lustre b2_5 build $84.&lt;/p&gt;</comment>
                            <comment id="92285" author="yujian" created="Sun, 24 Aug 2014 22:03:26 +0000"  >&lt;p&gt;For Lustre b2_5 build $84, it&apos;s also &lt;em&gt;only&lt;/em&gt; the SLES11SP3 client + RHEL6.5 server test session hit this issue:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/8198b614-2b7d-11e4-8687-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/8198b614-2b7d-11e4-8687-5254006e85c2&lt;/a&gt;&lt;br/&gt;
Other test sessions did not hit this issue.&lt;/p&gt;</comment>
                            <comment id="92724" author="yujian" created="Thu, 28 Aug 2014 16:46:19 +0000"  >&lt;p&gt;The issue did not occur on Lustre b2_5 build #85. It seems it&apos;s a sporadic test environment issue. Let&apos;s close this ticket now. If it occurs again, please reopen this ticket.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwu6v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15382</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>