<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:29:14 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2904] parallel-scale-nfsv3: FAIL: setup nfs failed!</title>
                <link>https://jira.whamcloud.com/browse/LU-2904</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;The parallel-scale-nfsv3 test failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Mounting NFS clients (version 3)...
CMD: client-12vm1,client-12vm2 mkdir -p /mnt/lustre
CMD: client-12vm1,client-12vm2 mount -t nfs -o nfsvers=3,async                 client-12vm3:/mnt/lustre /mnt/lustre
client-12vm2: mount.nfs: Connection timed out
client-12vm1: mount.nfs: Connection timed out
 parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed! 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Syslog on Lustre MDS/Lustre Client/NFS Server client-12vm3 showed that:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Mar  4 17:34:15 client-12vm3 mrshd[4254]: root@client-12vm1.lab.whamcloud.com as root: cmd=&apos;(PATH=$PATH:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin; cd /usr/lib64/lustre/tests; LUSTRE=&quot;/usr/lib64/lustre&quot;  sh -c &quot;exportfs -o rw,async,no_root_squash *:/mnt/lustre         &amp;amp;&amp;amp; exportfs -v&quot;);echo XXRETCODE:$?&apos;
Mar  4 17:34:15 client-12vm3 xinetd[1640]: EXIT: mshell status=0 pid=4253 duration=0(sec)
Mar  4 17:34:16 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:894 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:16 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:713 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:17 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:784 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:17 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:877 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:19 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:946 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:19 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:1013 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:23 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:797 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:23 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:701 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:31 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:719 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:31 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:941 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:41 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:943 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:41 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:810 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:51 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:849 for /mnt/lustre (/mnt/lustre)
Mar  4 17:34:51 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:740 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:01 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:846 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:01 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:667 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:11 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:955 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:11 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:1006 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:21 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:828 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:21 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:739 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:31 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:1011 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:31 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:994 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:41 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:847 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:41 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:756 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:51 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:892 for /mnt/lustre (/mnt/lustre)
Mar  4 17:35:51 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:749 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:01 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:1017 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:01 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:873 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:11 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:874 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:11 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:749 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:21 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.207:916 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:21 client-12vm3 rpc.mountd[4165]: authenticated mount request from 10.10.4.206:841 for /mnt/lustre (/mnt/lustre)
Mar  4 17:36:21 client-12vm3 xinetd[1640]: START: mshell pid=4286 from=::ffff:10.10.4.206
Mar  4 17:36:21 client-12vm3 mrshd[4287]: root@client-12vm1.lab.whamcloud.com as root: cmd=&apos;/usr/sbin/lctl mark &quot;/usr/sbin/lctl mark  parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed! &quot;;echo XXRETCODE:$?&apos;
Mar  4 17:36:21 client-12vm3 kernel: Lustre: DEBUG MARKER: /usr/sbin/lctl mark  parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed!
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/5cbf6978-853e-11e2-bfd3-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/5cbf6978-853e-11e2-bfd3-52540035b04c&lt;/a&gt;&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre b2_1 client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/181&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/181&lt;/a&gt;&lt;br/&gt;
Lustre master server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-master/1285&quot;&gt;http://build.whamcloud.com/job/lustre-master/1285&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64&lt;br/&gt;
</environment>
        <key id="17757">LU-2904</key>
            <summary>parallel-scale-nfsv3: FAIL: setup nfs failed!</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Mon, 4 Mar 2013 22:05:09 +0000</created>
                <updated>Wed, 20 Nov 2013 09:26:05 +0000</updated>
                            <resolved>Sat, 31 Aug 2013 18:48:20 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.1.6</fixVersion>
                    <fixVersion>Lustre 2.4.1</fixVersion>
                    <fixVersion>Lustre 2.5.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>17</watches>
                                                                            <comments>
                            <comment id="53312" author="bobijam" created="Tue, 5 Mar 2013 02:20:19 +0000"  >&lt;p&gt;I think it&apos;s not 2.1.4&amp;lt;c--s&amp;gt;2.4.0 interop test, since parallel-scale-nfs test use MDS node to mount the lustre filesystem, then use another client node to NFS mount the filesystem hosted on the MDS node.  The lustre client and server are all on MDS node, which is 2.4.0 system, no 2.1.4 lustre client involved.&lt;/p&gt;</comment>
                            <comment id="53314" author="bobijam" created="Tue, 5 Mar 2013 02:35:40 +0000"  >&lt;p&gt;from dmesg on MDS&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Lustre: DEBUG MARKER: exportfs -o rw,async,no_root_squash *:/mnt/lustre         &amp;amp;&amp;amp; exportfs -v&lt;br/&gt;
Lustre: DEBUG MARKER: /usr/sbin/lctl mark  parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed! &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;looks like exportfs fails, can you manually check it to see what possible reason that the exportfs command fails?&lt;/p&gt;</comment>
                            <comment id="53323" author="yujian" created="Tue, 5 Mar 2013 09:18:49 +0000"  >&lt;blockquote&gt;&lt;p&gt;I think it&apos;s not 2.1.4&amp;lt;c--s&amp;gt;2.4.0 interop test, since parallel-scale-nfs test use MDS node to mount the lustre filesystem, then use another client node to NFS mount the filesystem hosted on the MDS node. The lustre client and server are all on MDS node, which is 2.4.0 system, no 2.1.4 lustre client involved.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;You&apos;re right. I just searched out the latest parallel-scale-nfsv3 test reports on master branch, and found all of them failed to setup NFS:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/47bc2b0a-82d8-11e2-ba47-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/47bc2b0a-82d8-11e2-ba47-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/cc1be074-81fd-11e2-8564-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/cc1be074-81fd-11e2-8564-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/ee103132-7e59-11e2-8f4f-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/ee103132-7e59-11e2-8f4f-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7f885d40-7ba5-11e2-a4de-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7f885d40-7ba5-11e2-a4de-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/18a6689c-7ba5-11e2-8242-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/18a6689c-7ba5-11e2-8242-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/a205aef2-77ce-11e2-abae-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/a205aef2-77ce-11e2-abae-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This is really a regression issue on master branch.&lt;/p&gt;</comment>
                            <comment id="53324" author="yujian" created="Tue, 5 Mar 2013 09:23:53 +0000"  >&lt;blockquote&gt;&lt;p&gt;looks like exportfs fails, can you manually check it to see what possible reason that the exportfs command fails?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;The test output showed that:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: client-12vm3 exportfs -o rw,async,no_root_squash *:/mnt/lustre         &amp;amp;&amp;amp; exportfs -v
/mnt/lustre   	&amp;lt;world&amp;gt;(rw,async,wdelay,no_root_squash,no_subtree_check)

Mounting NFS clients (version 3)...
CMD: client-12vm1,client-12vm2 mkdir -p /mnt/lustre
CMD: client-12vm1,client-12vm2 mount -t nfs -o nfsvers=3,async                 client-12vm3:/mnt/lustre /mnt/lustre
client-12vm2: mount.nfs: Connection timed out
client-12vm1: mount.nfs: Connection timed out
 parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed!
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Running exportfs passed. The failure occurred while mounting the NFS clients.&lt;/p&gt;</comment>
                            <comment id="53775" author="bobijam" created="Tue, 12 Mar 2013 04:52:23 +0000"  >&lt;p&gt;git commit 4a88dc8 (&lt;a href=&quot;http://review.whamcloud.com/4904&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4904&lt;/a&gt; &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1866&quot; title=&quot;LFSCK Phase 1.5 for FID-in-dirent and linkEA consistency&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1866&quot;&gt;&lt;del&gt;LU-1866&lt;/del&gt;&lt;/a&gt; osd: FID-in-LMA and OI files) caused this nfs mount-timeout issue. Before that commit, lustre filesystem can be nfs mounted by nfs client.&lt;/p&gt;</comment>
                            <comment id="53869" author="yujian" created="Wed, 13 Mar 2013 04:28:52 +0000"  >&lt;p&gt;Lustre b2_1 client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/186&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/186&lt;/a&gt;&lt;br/&gt;
Lustre master server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-master/1302&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-master/1302&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64&lt;/p&gt;

&lt;p&gt;The same issue occurred: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/726a4448-8b59-11e2-965f-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/726a4448-8b59-11e2-965f-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53975" author="yong.fan" created="Thu, 14 Mar 2013 01:37:50 +0000"  >&lt;p&gt;The reason caused the failure:&lt;/p&gt;

&lt;p&gt;1) nfs defects, it does not work if the &quot;inode::i_ino&quot; is larger than 2^32, which is known issue.&lt;/p&gt;

&lt;p&gt;2) MDS returns new &quot;/ROOT&quot; FID to client, which was IGIF before, but now it is &lt;/p&gt;
{FID_SEQ_ROOT, 1, 0}
&lt;p&gt;. When client coverts the new FID into local inode::i_ino, it is larger than 2^32.&lt;/p&gt;

&lt;p&gt;Possible solutions:&lt;/p&gt;

&lt;p&gt;1) MDS still returns IGIF FID for /ROOT to client as it did before. But it only works for re-export Lustre &quot;/ROOT&quot;, if we want to re-export subdir under the &quot;/ROOT&quot;, it still does NOT work, such issue has been there all along is spite of the patch (&lt;a href=&quot;http://review.whamcloud.com/4904&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4904&lt;/a&gt;) or not.&lt;/p&gt;

&lt;p&gt;2) Use &quot;-o 32bitapi&quot; when mount the Lustre client which will re-export via VFS. It works for both &quot;/ROOT&quot; and its subdir re-exporting. (We still need some patch on client side, because of missing handle the &quot;-o 32bitapi&quot; at some corners). I prefer this one.&lt;/p&gt;


&lt;p&gt;Andreas, how do you think?&lt;/p&gt;</comment>
                            <comment id="53976" author="bzzz" created="Thu, 14 Mar 2013 01:58:51 +0000"  >&lt;p&gt;there is no IGIFs with ZFS backend, so I wouldn&apos;t consider (1) as an option.&lt;/p&gt;</comment>
                            <comment id="53981" author="yong.fan" created="Thu, 14 Mar 2013 02:56:32 +0000"  >&lt;p&gt;Alex, I agree with you.&lt;/p&gt;

&lt;p&gt;The patch for option 2) is here:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#change,5711&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5711&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="54569" author="yong.fan" created="Thu, 21 Mar 2013 15:26:57 +0000"  >&lt;p&gt;Yu jian, I have successfully mount nfs client with above patch and -o 32bitapi on the Lustre client. So you can verify more nfs related tests with it.&lt;/p&gt;

&lt;p&gt;BTW, not only the ROOT, but also any subdir can be re-exported.&lt;/p&gt;</comment>
                            <comment id="54573" author="yujian" created="Thu, 21 Mar 2013 16:08:08 +0000"  >&lt;blockquote&gt;&lt;p&gt;Yu jian, I have successfully mount nfs client with above patch and -o 32bitapi on the Lustre client. So you can verify more nfs related tests with it.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Could you please add the following test parameters to verify that? Thanks.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Test-Parameters: envdefinitions=SLOW=yes \
clientjob=lustre-b2_1 clientbuildno=191 \
testlist=parallel-scale-nfsv3,parallel-scale-nfsv4
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="54748" author="yong.fan" created="Mon, 25 Mar 2013 10:49:20 +0000"  >&lt;p&gt;Another possible solution is that: only allow re-export Lustre against the &quot;ROOT&quot;, not its sub-directories, then we can handle &quot;ROOT&quot; FID on the client specially to map it to a 32 bit ion#, then no need &quot;32bitapi&quot;.&lt;/p&gt;

&lt;p&gt;Andreas/Alex, how do you think?&lt;/p&gt;

&lt;p&gt;This is the patch for this idea:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#change,5840&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5840&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="55140" author="yong.fan" created="Sun, 31 Mar 2013 13:15:13 +0000"  >&lt;p&gt;Summary for the possible solutions:&lt;/p&gt;

&lt;p&gt;1) Fix NFS issues via client kernel patch. (not works for patch-less client)&lt;/p&gt;

&lt;p&gt;2) Use 32bitapi for re-export Lustre via NFS. The advantage is that it works for re-exporting Lustre via NFS for any directory. The shortcoming is that the client needs to be mounted as &quot;-o 32bitapi&quot;, which increases the possibility of ion# mapping collision. This is the patch:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#change,5711&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5711&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;3) Return a special IFIG FID &lt;/p&gt;
{FID_SEQ_IGIF, 1, 0}
&lt;p&gt; for the ROOT object to client, which is compatible with old Lustre-2.x/1.8 cases. It allows to re-export Lustre ROOT via NFS without client-side modification, needs NOT mount client with &quot;-o 32bitapi&quot;. The shortcoming is that it only works for ROOT, but we cannot re-export other non-ROOT via NFS. This is the patch:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#change,5840&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5840&lt;/a&gt; set2&lt;/p&gt;

&lt;p&gt;4) Map current ROOT FID &lt;/p&gt;
{FID_SEQ_ROOT, 1, 0}
&lt;p&gt; to a special 32-bit local ion# on the client, then allows to re-export Lustre ROOT via NFS without mount client with &quot;-o 32bitapi&quot;. The shortcoming is that 3.1) it only works for ROOT, but we cannot re-export other non-ROOT via NFS. 3.2) only works for new client, cannot use old client for the re-exporting. This is the patch:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#change,5840&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5840&lt;/a&gt; set1&lt;/p&gt;


&lt;p&gt;Before the NFS issues is fixed via upstream kernel patches, we need a temporary solution from 2)/3)/4). So any suggestions?&lt;/p&gt;</comment>
                            <comment id="55352" author="green" created="Wed, 3 Apr 2013 04:43:44 +0000"  >&lt;p&gt;I think nfsv4 allows 64 bit fid, does not it?&lt;br/&gt;
The solution number 2 seems pretty reasonable to me. in fact we even have it documented in the changelog in this way.&lt;/p&gt;</comment>
                            <comment id="55604" author="adilger" created="Fri, 5 Apr 2013 13:56:20 +0000"  >&lt;p&gt;Fan Yong, any progress on the #1 approach on a kernel patch to allow 64-bit inode numbers for root?  That is a big in the kernel anyway that should be fixed regardless of whether we have some other workaround, and we can keep the patch in our server kernel until it is included upstream.  Then, users can either use the server kernel on NFS exporting clients, or whatever other workaround we have for patchless clients, but the problem will go away in the future. &lt;/p&gt;</comment>
                            <comment id="55883" author="pjones" created="Tue, 9 Apr 2013 15:45:49 +0000"  >&lt;p&gt;The #2 patch has landed so dropping this in priority. Any further work to push a fix upstream can be handled after 2.4.0 is GA&lt;/p&gt;</comment>
                            <comment id="56676" author="yong.fan" created="Mon, 22 Apr 2013 12:47:56 +0000"  >&lt;p&gt;On somehow, this issue can be resolved by specifying &quot;fsid=1&quot; (without &quot;32bitapi&quot; for Lustre mount option) when re-export Lustre via NFS (v3 or v4). For example: &quot;/mnt/lustre 10.211.55.*(rw,no_root_squash,fsid=1)&quot;. (Verified on 2.6.32-358.2.1.el6)&lt;/p&gt;</comment>
                            <comment id="57704" author="yong.fan" created="Mon, 6 May 2013 04:01:46 +0000"  >&lt;p&gt;Do NOT need more patch, since there is other solution which can bypass the 32-bit ino issue.&lt;/p&gt;</comment>
                            <comment id="58638" author="adilger" created="Thu, 16 May 2013 05:51:51 +0000"  >&lt;p&gt;Nasf, it seems there is still a defect in the upstream kernel, where it cannot handle a 64-bit inode number for the NFS root?  Could you please at minimum send a bug report to the linux-nfs@vger.kernel.org mailing list with details (CC me also), so that this can eventually be fixed.&lt;/p&gt;</comment>
                            <comment id="59038" author="yong.fan" created="Wed, 22 May 2013 08:49:09 +0000"  >&lt;p&gt;There are two ways to resolve the issue:&lt;/p&gt;

&lt;p&gt;1) Patch Lustre to support UUID. Means the statfs64() on Lustre will return valid UUID, nfsd will generate nfs handle with 64-bits ino plus the UUID. Then we do NOT need to patch kernel. The work to be done:&lt;br/&gt;
1.1) Patch user space nfs-utils to use 64-bits ino# instead of 32-bits ino#.&lt;br/&gt;
1.2) Patch Lustre to return valid UUID for statfs64(). The client needs to fetch the UUID from MDT0 via MDS_STATFS RPC. On MDT side, we can return the backend FS UUID for that. ldiskfs has supported that already. zfs backend has NOT implemented yet. So need small changes for zfs backend.&lt;/p&gt;

&lt;p&gt;2) Patch kernel to support 64-bits ino# for nfs handle. The work to be done.&lt;br/&gt;
2.1) Patch user space nfs-utils to use 64-bits ino# instead of 32-bits ino#.&lt;br/&gt;
2.2) Patch kernel to support 64-bits ino# for nfs handle.&lt;/p&gt;


&lt;p&gt;The work for 1.1) and 2.1) are similar. But 1.2) and 2.2) are quite different. I prefer to the first solution. How do you think?&lt;/p&gt;</comment>
                            <comment id="59041" author="yong.fan" created="Wed, 22 May 2013 09:17:15 +0000"  >&lt;p&gt;Patch for 1.1)&lt;/p&gt;</comment>
                            <comment id="59043" author="yong.fan" created="Wed, 22 May 2013 09:32:22 +0000"  >&lt;p&gt;Patch for 1.2)&lt;/p&gt;</comment>
                            <comment id="59071" author="adilger" created="Wed, 22 May 2013 15:55:16 +0000"  >&lt;p&gt;Actually, I thought we are using the Lustre MDT target name for the UUID already?  That is common across all clients already and will not be broken by backup and restore of the underlying MDT device. &lt;/p&gt;</comment>
                            <comment id="59073" author="yong.fan" created="Wed, 22 May 2013 16:31:24 +0000"  >&lt;p&gt;You mean the name &apos;lustre-MDT0000&apos; or similar? The uuid used for nfs handle is two int values, which is returned via statfs(). If we want to use it, we need to make some conversation.&lt;/p&gt;</comment>
                            <comment id="59198" author="adilger" created="Thu, 23 May 2013 21:25:22 +0000"  >&lt;p&gt;Yes, we already do this in the client:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;&lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; client_common_fill_super(...)
{
        :
        :
        /* We set sb-&amp;gt;s_dev equal on all lustre clients in order to support
         * NFS export clustering.  NFSD requires that the FSID be the same
         * on all clients. */
        /* s_dev is also used in lt_compare() to compare two fs, but that is
         * only a node-local comparison. */
        uuid = obd_get_uuid(sbi-&amp;gt;ll_md_exp);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (uuid != NULL)
                sb-&amp;gt;s_dev = get_uuid2int(uuid-&amp;gt;uuid, strlen(uuid-&amp;gt;uuid));
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This could be improved to provide the full fsid for NFS instead of just the 32-bit device number.&lt;/p&gt;

&lt;p&gt;Note that I submitted the nfs-utils mount patch upstream, so the need for &quot;32bitapi&quot; mount option for 64-bit clients will not be around long.&lt;/p&gt;</comment>
                            <comment id="59345" author="yujian" created="Mon, 27 May 2013 04:20:42 +0000"  >&lt;p&gt;Lustre b2_1 client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/204&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/204&lt;/a&gt;&lt;br/&gt;
Lustre master server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-master/1508&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-master/1508&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.4/x86_64&lt;/p&gt;

&lt;p&gt;The issue still occurred: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b5a0c146-c624-11e2-9bf1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b5a0c146-c624-11e2-9bf1-52540035b04c&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: client-26vm3 exportfs -o rw,async,no_root_squash *:/mnt/lustre         &amp;amp;&amp;amp; exportfs -v
/mnt/lustre   	&amp;lt;world&amp;gt;(rw,async,wdelay,no_root_squash,no_subtree_check)

Mounting NFS clients (version 3)...
CMD: client-26vm5,client-26vm6.lab.whamcloud.com mkdir -p /mnt/lustre
CMD: client-26vm5,client-26vm6.lab.whamcloud.com mount -t nfs -o nfsvers=3,async                 client-26vm3:/mnt/lustre /mnt/lustre
client-26vm6: mount.nfs: Connection timed out
client-26vm5: mount.nfs: Connection timed out
 parallel-scale-nfsv3 : @@@@@@ FAIL: setup nfs failed! 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;blockquote&gt;&lt;p&gt;On somehow, this issue can be resolved by specifying &quot;fsid=1&quot; (without &quot;32bitapi&quot; for Lustre mount option) when re-export Lustre via NFS (v3 or v4). For example: &quot;/mnt/lustre 10.211.55.*(rw,no_root_squash,fsid=1)&quot;. (Verified on 2.6.32-358.2.1.el6)&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;We need a patch on Lustre b2_1 branch to resolve the interop issue.&lt;/p&gt;</comment>
                            <comment id="59356" author="yujian" created="Mon, 27 May 2013 06:46:23 +0000"  >&lt;p&gt;Patch for Lustre b2_1 branch to add &quot;32bitapi&quot; Lustre client mount option while exporting the Lustre client as NFSv3 server: &lt;a href=&quot;http://review.whamcloud.com/6457&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6457&lt;/a&gt;&lt;br/&gt;
Patch for Lustre b1_8 branch: &lt;a href=&quot;http://review.whamcloud.com/6663&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6663&lt;/a&gt;&lt;br/&gt;
Patch for Lustre master branch: &lt;a href=&quot;http://review.whamcloud.com/6649&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6649&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="62005" author="shadow" created="Wed, 10 Jul 2013 07:20:37 +0000"  >&lt;p&gt;last patch &lt;br/&gt;
&lt;a href=&quot;http://git.whamcloud.com/?p=fs/lustre-release.git;a=commitdiff;h=8c4f4a47e051b097358818f4d3777d02124abbe7&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://git.whamcloud.com/?p=fs/lustre-release.git;a=commitdiff;h=8c4f4a47e051b097358818f4d3777d02124abbe7&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;looks invalid - lustre client had already such code&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        /* We set sb-&amp;gt;s_dev equal on all lustre clients in order to support
         * NFS export clustering.  NFSD requires that the FSID be the same
         * on all clients. */
        /* s_dev is also used in lt_compare() to compare two fs, but that is
         * only a node-local comparison. */
        uuid = obd_get_uuid(sbi-&amp;gt;ll_md_exp);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (uuid != NULL)
                sb-&amp;gt;s_dev = get_uuid2int(uuid-&amp;gt;uuid, strlen(uuid-&amp;gt;uuid));
        sbi-&amp;gt;ll_mnt = mnt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In that case exporting an s_dev via statfs will enough.&lt;/p&gt;</comment>
                            <comment id="62010" author="yong.fan" created="Wed, 10 Jul 2013 09:50:15 +0000"  >&lt;p&gt;32-bits uuid maybe works for this case, but since the POSIX API is 64-bits, and the statfs() is not only for re-exporting via nfs, but also for others, so we prefer to generate and return 64-bits uuid as expected.&lt;/p&gt;</comment>
                            <comment id="62011" author="shadow" created="Wed, 10 Jul 2013 10:07:18 +0000"  >&lt;p&gt;fsid have a single requirement - that is should be same for a cluster and unique on node.&lt;br/&gt;
i think 32bits uid is enough to encode FS id in statfs.&lt;br/&gt;
but using a single FS have a benefits with interop - different nodes (with older and new nfsd) have same fs id in NFS handles so may used in failover pair.&lt;/p&gt;

&lt;p&gt;I have some question to second patch also - we have prepared own NFS handle structure with lu_fid inside and it&apos;s should be don&apos;t have a limitation over 32bits, if we have lost one code patch and nfs handle created with wrong format - we need invest it is.&lt;/p&gt;</comment>
                            <comment id="62014" author="yong.fan" created="Wed, 10 Jul 2013 12:53:21 +0000"  >&lt;p&gt;Honestly, I am not sure whether 32-bits is enough or not for kinds of statfs() users. It is true that in mixed environment new client will export 64-bits FSID and old client will export 32-bits FSID, such difference may cause issues if the users want to access Lustre via different clients with the same handle. But I do not know whether someone really will want to use Lustre as that. From a long view, we need to upgrade the FSID to 64-bits, otherwise, if 32-bits is always enough, the statfs() API should be shrink...&lt;/p&gt;

&lt;p&gt;As for NFS handle with lu_fid, it works for objects under export-point, but the root NFS handle does not contains the lu_fid (which does not goes down to Lustre). That is why we make this patch.&lt;/p&gt;</comment>
                            <comment id="62017" author="shadow" created="Wed, 10 Jul 2013 13:24:25 +0000"  >&lt;p&gt;did you really think 2^32 lustre mounts exist on single node? FSid just an unique id for a mount point.&lt;br/&gt;
root cause to have it&apos;s 64bit - 32bit for block device id and 32bit for slice number inside of block device, so just need identify a mount point correctly. In case lustre, we don&apos;t have a slices inside a device and don&apos;t need a fill it, but device id exactly identify an mount point. &lt;/p&gt;

&lt;p&gt;as about root node for an export, let me look - but as i remember a nfs-tools it&apos;s also as about NFS handle from an kernel. &lt;/p&gt;</comment>
                            <comment id="62022" author="shadow" created="Wed, 10 Jul 2013 13:58:14 +0000"  >&lt;p&gt;anyway most kernel FS uses&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;&amp;lt;------&amp;gt;u64 id = huge_encode_dev(sb-&amp;gt;s_bdev-&amp;gt;bd_dev);
&amp;lt;------&amp;gt;buf-&amp;gt;f_fsid.val[0] = (u32)id;
&amp;lt;------&amp;gt;buf-&amp;gt;f_fsid.val[1] = (u32)(id &amp;gt;&amp;gt; 32);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;i don&apos;t understand - why do not use same.&lt;/p&gt;</comment>
                            <comment id="62028" author="yong.fan" created="Wed, 10 Jul 2013 15:25:54 +0000"  >&lt;p&gt;The root issue is in user space nfs-utils.&lt;/p&gt;

&lt;p&gt;1) The FSID returned via statfs() to nfs-utils is 64-bits, in spite of use the new generated 64-bits FSID or reuse the old 32-bits FSID. If old 32-bits FSID is used, then __kernel_fsid_t::val&lt;span class=&quot;error&quot;&gt;&amp;#91;1&amp;#93;&lt;/span&gt; (or val&lt;span class=&quot;error&quot;&gt;&amp;#91;0&amp;#93;&lt;/span&gt;) is zero. Before this patch applied, Lustre did not return FSID via statfs().&lt;/p&gt;

&lt;p&gt;2) The root NFS handle contains root inode#. Lustre root inode# is 64-bits, but when nfs-utils parses the root handle, it is converted to 32-bits. So it cannot locate the right &quot;inode&quot;. I have made a patch for that, and sent it to related kernel maintainers, and hope the patch can be accepted/landed in the next nfs-utils release.&lt;/p&gt;

&lt;p&gt;diff --git a/utils/mountd/cache.c b/utils/mountd/cache.c&lt;br/&gt;
index 517aa62..a7212e7 100644&lt;br/&gt;
&amp;#8212; a/utils/mountd/cache.c&lt;br/&gt;
+++ b/utils/mountd/cache.c&lt;br/&gt;
@@ -388,7 +388,7 @@ struct parsed_fsid {&lt;br/&gt;
        int fsidtype;&lt;br/&gt;
        /* We could use a union for this, but it would be more&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;complicated; why bother? */&lt;/li&gt;
&lt;/ul&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;unsigned int inode;&lt;br/&gt;
+       uint64_t inode;&lt;br/&gt;
        unsigned int minor;&lt;br/&gt;
        unsigned int major;&lt;br/&gt;
        unsigned int fsidnum;&lt;br/&gt;
&amp;#8211;&lt;br/&gt;
1.7.1&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="62077" author="shadow" created="Thu, 11 Jul 2013 08:25:12 +0000"  >&lt;p&gt;1) I understand (and agree) about returning a fs id from statfs - but think we may use a s_dev for it and 32bit is enough, and we may use an high part of fs id with filling lustre magic (if need).&lt;/p&gt;

&lt;p&gt;2) let me time until Monday to look into mounted code carefully.&lt;/p&gt;</comment>
                            <comment id="62110" author="yong.fan" created="Thu, 11 Jul 2013 14:39:29 +0000"  >&lt;p&gt;1) In theory, we can fill the low 32-bits of FSID with s_dev, and fill the high 32-bits of FSID as anything, such as Lustre magic. But in spite of what is filled, we cannot control how the caller to use the returned FSID. And there is no explicit advantage of replacing current patch, since the 64-bits FSID only be generated when mount.&lt;/p&gt;</comment>
                            <comment id="62169" author="shadow" created="Fri, 12 Jul 2013 07:41:36 +0000"  >&lt;p&gt;well, you can&apos;t control how caller will use FSID anyway, but FSID purpose - just separate one NFS handle in hash from other. In case single FS exported via different path and NFS may do round robin access to same files or load balancing (in NFS v4). In that case any number (as you see in ticket set FSID=1 in exports file) is enough, but it&apos;s need to be unique at host and and same on cluster. As you don&apos;t know which versions NFS servers in load balancing pairs used - you need present same FSID for each case - when it&apos;s generated from s_dev and from stat() call.&lt;br/&gt;
Also it&apos;s avoid using a private kernel types in lustre includes/structures.&lt;/p&gt;
</comment>
                            <comment id="62250" author="yong.fan" created="Sun, 14 Jul 2013 02:39:09 +0000"  >&lt;p&gt;Currently, the llite can export both 32-bits &quot;s_dev&quot; and the 64-bits &quot;FSID&quot;, which one will be used depends on the users (nfs-utils or other applications). Even if they are different, they can be used to indicate/locate the same the device (FS). Using the &quot;s_dev&quot; will ignore &quot;FSID&quot;, the same for reserve case. It is not required to be the same.&lt;/p&gt;

&lt;p&gt;I am not sure I caught the point you worry about, but you can give my a detailed example that the patch breaking something.&lt;/p&gt;</comment>
                            <comment id="62458" author="yujian" created="Wed, 17 Jul 2013 07:00:40 +0000"  >&lt;p&gt;Hi Oleg,&lt;/p&gt;

&lt;p&gt;Could you please cherry-pick &lt;a href=&quot;http://review.whamcloud.com/6493&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6493&lt;/a&gt; to Lustre b2_4 branch? Thanks.&lt;/p&gt;

&lt;p&gt;The parallel-scale-nfsv3 test also failed on the current Lustre b2_4 branch:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/9f30063c-ed8f-11e2-8e3a-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/9f30063c-ed8f-11e2-8e3a-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="62460" author="yujian" created="Wed, 17 Jul 2013 07:40:45 +0000"  >&lt;p&gt;The patch of &lt;a href=&quot;http://review.whamcloud.com/6493&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6493&lt;/a&gt; also needs to be backported to Lustre b1_8 and b2_1 branches to pass the testing on the following interop configurations:&lt;/p&gt;

&lt;p&gt;NFS clients + Lustre b1_8/b2_1 NFS server/Lustre client + Lustre b2_4/master Lustre servers&lt;/p&gt;</comment>
                            <comment id="62465" author="shadow" created="Wed, 17 Jul 2013 09:12:04 +0000"  >&lt;p&gt;you have break a clustered NFS or NFS failver configuation where &lt;br/&gt;
2 NFS servers in pair - first with older nfs-utils tools where fsid generated from s_dev, second with this patch.&lt;/p&gt;

&lt;p&gt;so you have broke interoperability with older versions. &lt;/p&gt;</comment>
                            <comment id="63959" author="yujian" created="Fri, 9 Aug 2013 12:55:46 +0000"  >&lt;p&gt;Hi Oleg,&lt;/p&gt;

&lt;p&gt;Could you please cherry-pick the patch to Lustre b2_4 branch? Thanks.&lt;/p&gt;

&lt;p&gt;The failure occurred regularly on Lustre b2_4 branch:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/0c61eedc-fdad-11e2-9fd5-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/0c61eedc-fdad-11e2-9fd5-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/f1c60464-fd16-11e2-9fdb-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/f1c60464-fd16-11e2-9fdb-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/13499228-fcda-11e2-b90c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/13499228-fcda-11e2-b90c-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/3d64d0f8-fcc2-11e2-9fdb-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/3d64d0f8-fcc2-11e2-9fdb-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/512fc62e-fcb8-11e2-9222-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/512fc62e-fcb8-11e2-9222-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="64152" author="yujian" created="Tue, 13 Aug 2013 04:01:18 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;http://review.whamcloud.com/6493&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6493&lt;/a&gt; was landed on Lustre b2_4 branch.&lt;/p&gt;</comment>
                            <comment id="64158" author="shadow" created="Tue, 13 Aug 2013 07:58:32 +0000"  >&lt;p&gt;I strongly disagree about that patch, please revert and commit correct version without clustred nfs interoperability broken.&lt;/p&gt;</comment>
                            <comment id="64243" author="paf" created="Wed, 14 Aug 2013 14:18:06 +0000"  >&lt;p&gt;I think I agree with Alexey - What&apos;s the purpose of requiring a patched version of NFS-utils?  Obviously we eventually want to fix the entire NFS-utils and kernel NFS/NFSD problems with 64 bit root inodes, but until complete fixes are available, shouldn&apos;t we not require a patch?  (Also, having an nfs-utils patch adds another package that Lustre users must build themselves or that must be provided with Lustre &lt;span class=&quot;error&quot;&gt;&amp;#91;like e2fsprogs&amp;#93;&lt;/span&gt;.)&lt;/p&gt;

&lt;p&gt;It seems like the better solution is to document and require the -o fsid= option while pushing for fixes upstream.  (This is Cray&apos;s plan going forward whether or not this specific patch remains in Lustre.)&lt;/p&gt;</comment>
                            <comment id="64935" author="yong.fan" created="Fri, 23 Aug 2013 04:02:48 +0000"  >&lt;p&gt;Generate the FSID from super_block::s_dev.&lt;/p&gt;

&lt;p&gt;The patch for master:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/7434/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7434/&lt;/a&gt;&lt;br/&gt;
The patch for b2_4:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/7435/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/7435/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="65499" author="pjones" created="Sat, 31 Aug 2013 18:48:20 +0000"  >&lt;p&gt;Landed for 2.4.1 and 2.5&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="18881">LU-3318</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="19660">LU-3550</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="21250">LU-4057</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12896" name="0001-LU-2904-nfs-support-64-bits-inode-number-in-nfs-hand.patch" size="1341" author="yong.fan" created="Wed, 22 May 2013 09:17:15 +0000"/>
                            <attachment id="12897" name="0001-LU-2904-obdclass-return-valid-uuid-for-statfs.patch" size="2493" author="yong.fan" created="Wed, 22 May 2013 09:32:22 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvk5z:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6993</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>