<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:31:29 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10038] sanity test 133g fails with &#8220; &apos;$&apos;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&apos;&apos; </title>
                <link>https://jira.whamcloud.com/browse/LU-10038</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanity test_133g fails on the call to find on the MDS. From the test_log, we see the call to find&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mds1_proc_dirs=&apos;/proc/fs/lustre/
/proc/sys/lnet/
/sys/fs/lustre/
/sys/kernel/debug/lnet/
/sys/kernel/debug/lustre/&apos;
 sanity test_133g: @@@@@@ FAIL: mds1 find /proc/fs/lustre/
/proc/sys/lnet/
/sys/fs/lustre/
/sys/kernel/debug/lnet/
/sys/kernel/debug/lustre/ failed 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looking at the client2 dmesg, we see the output from 133g&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 3573.106070] Lustre: DEBUG MARKER: == sanity test 133g: Check for Oopses on bad io area writes/reads in /proc =========================== 21:44:29 (1504907069)
[ 3573.249527] Lustre: 18911:0:(lprocfs_status.c:2483:lprocfs_wr_root_squash()) lustre: failed to set root_squash due to bad address, rc = -14
[ 3573.254669] Lustre: 18911:0:(lprocfs_status.c:2479:lprocfs_wr_root_squash()) lustre: failed to set root_squash to &quot;&quot;, needs uid:gid format, rc = -22
[ 3573.263113] Lustre: 18916:0:(lprocfs_status.c:2551:lprocfs_wr_nosquash_nids()) lustre: failed to set nosquash_nids to &quot;&quot;, bad address rc = -14
[ 3573.268474] Lustre: 18916:0:(lprocfs_status.c:2555:lprocfs_wr_nosquash_nids()) lustre: failed to set nosquash_nids due to string too long rc = -22
[ 3573.349716] LustreError: 18934:0:(gss_cli_upcall.c:240:gss_do_ctx_init_rpc()) ioctl size 5, expect 80, please check lgss_keyring version
[ 3573.379338] LustreError: 18984:0:(ldlm_resource.c:355:lru_size_store()) lru_size: invalid value written
[ 3573.422121] Lustre: 19067:0:(libcfs_string.c:127:cfs_str2mask()) unknown mask &apos; &apos;.
mask usage: [+|-]&amp;lt;all|type&amp;gt; ...
[ 3574.553074] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanity test_133g: @@@@@@ FAIL: mds1 find \/proc\/fs\/lustre\/
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looking at the dmesg log on the MDS1, we see similar output, but a few more lines&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 3479.763450] Lustre: DEBUG MARKER: find /proc/fs/lustre/ /proc/sys/lnet/ /sys/fs/lustre/ /sys/kernel/debug/lnet/ /sys/kernel/debug/lustre/ -type f -not -name force_lbug -not -name changelog_mask -exec badarea_io {} \;
[ 3479.911266] Lustre: 17021:0:(mdt_coordinator.c:1944:mdt_hsm_policy_seq_write()) lustre-MDT0000: &apos; &apos; is unknown, supported policies are:
[ 3479.944620] LustreError: 17069:0:(mdt_coordinator.c:2097:mdt_hsm_cdt_control_seq_write()) lustre-MDT0000: Valid coordinator control commands are: enabled shutdown disabled purge help
[ 3479.950356] Lustre: 17071:0:(lprocfs_status.c:2483:lprocfs_wr_root_squash()) lustre-MDT0000: failed to set root_squash due to bad address, rc = -14
[ 3479.954690] Lustre: 17071:0:(lprocfs_status.c:2479:lprocfs_wr_root_squash()) lustre-MDT0000: failed to set root_squash to &quot;&quot;, needs uid:gid format, rc = -22
[ 3479.960431] LustreError: 17072:0:(genops.c:1540:obd_export_evict_by_uuid()) lustre-MDT0000: can&apos;t disconnect : no exports found
[ 3479.965980] LustreError: 17074:0:(mdt_lproc.c:366:lprocfs_identity_info_seq_write()) lustre-MDT0000: invalid data count = 5, size = 1048
[ 3479.970389] LustreError: 17074:0:(mdt_lproc.c:383:lprocfs_identity_info_seq_write()) lustre-MDT0000: MDS identity downcall bad params
[ 3479.975746] Lustre: 17075:0:(lprocfs_status.c:2551:lprocfs_wr_nosquash_nids()) lustre-MDT0000: failed to set nosquash_nids to &quot;&quot;, bad address rc = -14
[ 3479.980510] Lustre: 17075:0:(lprocfs_status.c:2555:lprocfs_wr_nosquash_nids()) lustre-MDT0000: failed to set nosquash_nids due to string too long rc = -22
[ 3479.988148] LustreError: 17079:0:(mdt_lproc.c:298:mdt_identity_upcall_seq_write()) lustre-MDT0000: identity upcall too long
[ 3480.066568] LustreError: 17187:0:(lproc_fid.c:177:lprocfs_server_fid_width_seq_write()) ctl-lustre-MDT0000: invalid FID sequence width: rc = -14
[ 3480.104133] LustreError: 17240:0:(ldlm_resource.c:104:seq_watermark_write()) Failed to set lock_limit_mb, rc = -14.
[ 3480.122289] LustreError: 17250:0:(nodemap_storage.c:420:nodemap_idx_nodemap_update()) cannot add nodemap config to non-existing MGS.
[ 3480.128801] LustreError: 17252:0:(nodemap_handler.c:1049:nodemap_create()) cannot add nodemap: &apos; &apos;: rc = -22
[ 3480.250476] LustreError: 17363:0:(ldlm_resource.c:355:lru_size_store()) lru_size: invalid value written
[ 3480.333333] Lustre: 17493:0:(libcfs_string.c:127:cfs_str2mask()) unknown mask &apos; &apos;.
mask usage: [+|-]&amp;lt;all|type&amp;gt; ...
[ 3480.468566] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanity test_133g: @@@@@@ FAIL: mds1 find \/proc\/fs\/lustre\/
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This ticket was opened because we are seeing this in testing a separate MGS and MDS. Logs for these failures are at:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/308ca8f6-97d7-11e7-b944-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/308ca8f6-97d7-11e7-b944-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/774c9f26-97d7-11e7-b944-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/774c9f26-97d7-11e7-b944-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;We have also seen this test fail frequently during interop testing.&lt;/p&gt;
</description>
                <environment></environment>
        <key id="48498">LU-10038</key>
            <summary>sanity test 133g fails with &#8220; &apos;$&apos;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&apos;&apos; </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>tests</label>
                    </labels>
                <created>Wed, 27 Sep 2017 18:00:19 +0000</created>
                <updated>Tue, 17 Jul 2018 18:29:00 +0000</updated>
                            <resolved>Wed, 28 Mar 2018 14:46:33 +0000</resolved>
                                    <version>Lustre 2.11.0</version>
                                    <fixVersion>Lustre 2.11.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="209757" author="green" created="Wed, 27 Sep 2017 18:18:31 +0000"  >&lt;p&gt;So /proc/sys/lustre /proc/sys/lnet no longer exist and the logic in the test cannot catch it it seems?&lt;/p&gt;

&lt;p&gt;this bit:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        local proc_dirs=$(eval \ls -d $proc_regexp 2&amp;gt;/dev/&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;is supposed to reduce the list from &lt;tt&gt;proc_regexp=&quot;/{proc,sys}/{fs,sys,kernel/debug}/{lustre,lnet}/&quot;&lt;/tt&gt; to:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# ls -ld /{proc,sys}/{fs,sys,kernel/debug}/{lustre,lnet}/ 2&amp;gt;/dev/null
dr-xr-xr-x 26 root root 0 Sep 27 14:13 /proc/fs/lustre/
drwxr-xr-x 21 root root 0 Sep 27 14:02 /sys/fs/lustre/
drwxr-xr-x  2 root root 0 Sep 27 14:02 /sys/kernel/debug/lnet/
drwxr-xr-x  2 root root 0 Sep 27 14:02 /sys/kernel/debug/lustre/

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;but in the test output we see&#160;&lt;tt&gt;/proc/sys/lnet&lt;/tt&gt; somehow made it to the list too.&lt;/p&gt;

&lt;p&gt;Interop testing should not be affected because we recheck the list of dirs from every facet separately (since --&lt;del&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7092&quot; title=&quot;Interop 2.7.0&amp;lt;-&amp;gt;master sanity test_133g 133f failed: mds1 find  /proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lustre/ failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7092&quot;&gt;&lt;del&gt;LU-7092&lt;/del&gt;&lt;/a&gt;&lt;/del&gt;-- )&lt;/p&gt;</comment>
                            <comment id="209777" author="adilger" created="Wed, 27 Sep 2017 21:35:13 +0000"  >&lt;p&gt;It&apos;s possible that the pathnames are being expanded on the client instead of on the server?&#160; Looking at the debug logs, it looks like the &quot;ls -d ...&quot; is working properly, but it is possible.&#160;&lt;/p&gt;

&lt;p&gt;In any case, this seems like a bug in the test and not a functionality problem.&#160; I suspect that sufficient escaping of &lt;tt&gt;$proc_regexp&lt;/tt&gt; might help,&lt;/p&gt;</comment>
                            <comment id="209778" author="jamesanunez" created="Wed, 27 Sep 2017 21:37:18 +0000"  >&lt;p&gt;I show that /proc/sys/lnet does have files and /proc/sys/lustre does not make it into the list of dirs to run find in. On the MDS on a test system:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# ls -d /{proc,sys}/{fs,sys,kernel/debug}/{lustre,lnet}
ls: cannot access /proc/fs/lnet: No such file or directory
ls: cannot access /proc/sys/lustre: No such file or directory
ls: cannot access /proc/kernel/debug/lustre: No such file or directory
ls: cannot access /proc/kernel/debug/lnet: No such file or directory
ls: cannot access /sys/fs/lnet: No such file or directory
ls: cannot access /sys/sys/lustre: No such file or directory
ls: cannot access /sys/sys/lnet: No such file or directory
/proc/fs/lustre  /sys/fs/lustre          /sys/kernel/debug/lustre
/proc/sys/lnet   /sys/kernel/debug/lnet
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I&apos;m running on latest master 2.10.53 3.10.0-693.1.1.el7_lustre.x86_64. &lt;/p&gt;

&lt;p&gt;I&apos;m a little confused by the output from the find command in this test. Why or where are we getting those Lustre and LustreError messages:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;...
[ 3480.122289] LustreError: 17250:0:(nodemap_storage.c:420:nodemap_idx_nodemap_update()) cannot add nodemap config to non-existing MGS.
[ 3480.128801] LustreError: 17252:0:(nodemap_handler.c:1049:nodemap_create()) cannot add nodemap: &apos; &apos;: rc = -22
[ 3480.250476] LustreError: 17363:0:(ldlm_resource.c:355:lru_size_store()) lru_size: invalid value written
[ 3480.333333] Lustre: 17493:0:(libcfs_string.c:127:cfs_str2mask()) unknown mask &apos; &apos;.
mask usage: [+|-]&amp;lt;all|type&amp;gt; ...
[ 3480.468566] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanity test_133g: @@@@@@ FAIL: mds1 find \/proc\/fs\/lustre\/
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I get the same output, those errors in the MDS dmesg log, in my test system but the test doesn&apos;t fail. &lt;/p&gt;</comment>
                            <comment id="209789" author="simmonsja" created="Thu, 28 Sep 2017 00:02:08 +0000"  >&lt;p&gt;One of the test is to scribble random data into the proc/sysfs/debugfs entries. This is to ensure we don&apos;t oops or touch user space memory incorrectly. &lt;/p&gt;</comment>
                            <comment id="209800" author="green" created="Thu, 28 Sep 2017 01:46:30 +0000"  >&lt;p&gt;Yes, like James Simmons said, the errors are normal - this is because we write garbage to random files.&lt;br/&gt;
The problem for the failure is that when you do find i na list of directories soe of which don&apos;t exist, you get an error. So need to find which dir is that that does not exist and why it was not filtered out.&lt;/p&gt;</comment>
                            <comment id="212807" author="adilger" created="Sat, 4 Nov 2017 14:42:46 +0000"  >&lt;p&gt;Hit this again:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/5cdcc7f8-c08e-11e7-8cd9-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/5cdcc7f8-c08e-11e7-8cd9-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="213783" author="gerrit" created="Wed, 15 Nov 2017 16:12:05 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30105&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30105&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: improve error reporting in sanity test_133g()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4bfa1ce0881d3dbe737a2a1e5b2d85679fb41993&lt;/p&gt;</comment>
                            <comment id="214482" author="gerrit" created="Wed, 22 Nov 2017 20:49:51 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30219&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30219&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: improve error reporting in sanity test_133g()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: flr&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4cef38d724a05ce0ae386cb9d2a618d187cbe8d1&lt;/p&gt;</comment>
                            <comment id="214495" author="gerrit" created="Thu, 23 Nov 2017 00:47:08 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30219/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30219/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: improve error reporting in sanity test_133g()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: flr&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f1420059ac7d33cba65ab1b14fd5eade3c889684&lt;/p&gt;</comment>
                            <comment id="214912" author="gerrit" created="Wed, 29 Nov 2017 05:59:40 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30105/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30105/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: improve error reporting in sanity test_133g()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: e5eaaff6e378b8c95d0a809f4dd3b4817d9fd492&lt;/p&gt;</comment>
                            <comment id="215657" author="jhammond" created="Fri, 8 Dec 2017 14:31:40 +0000"  >&lt;p&gt;Here&apos;s a failure that occurred after the error reporting patch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/ed882ff6-d54a-11e7-a066-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/ed882ff6-d54a-11e7-a066-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It looks like one of the export directories is going away while find is running on the mdt:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity test 133g: Check for Oopses on bad io area writes/reads in /proc =========================== 20:28:46 (1511987326)
proc_dirs=&apos;/proc/fs/lustre/
/sys/fs/lustre/
/sys/kernel/debug/lnet/
/sys/kernel/debug/lustre/&apos;
CMD: onyx-38vm9 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl lustre_build_version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl --version 2&amp;gt;/dev/null | cut -d&apos; &apos; -f2
mds1_proc_dirs=&apos;/proc/fs/lustre/
/sys/fs/lustre/
/sys/kernel/debug/lnet/
/sys/kernel/debug/lustre/&apos;
CMD: onyx-38vm9 find /proc/fs/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lnet/ /sys/kernel/debug/lustre/ -type f -not -name force_lbug -not -name changelog_mask -exec badarea_io {} \;
onyx-38vm9: find: &#8216;/proc/fs/lustre/mdt/lustre-MDT0000/exports/10.2.8.104@tcp&#8217;: No such file or directory
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="215664" author="jhammond" created="Fri, 8 Dec 2017 15:50:32 +0000"  >&lt;p&gt;This must be because we are writing to the &lt;tt&gt;.../exports/clear&lt;/tt&gt; file between readdir and accessing the 10.2.8.104@tcp subdir. find has a &lt;tt&gt;&amp;#45;ignore_readdir_race&lt;/tt&gt; that should address this.&lt;/p&gt;</comment>
                            <comment id="215673" author="gerrit" created="Fri, 8 Dec 2017 16:34:24 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30451&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30451&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: ignore readdir races in sanity 133g&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 7e1322643470688cb0c306dc866e3c2f84ad4c26&lt;/p&gt;</comment>
                            <comment id="215809" author="adilger" created="Fri, 8 Dec 2017 17:42:12 +0000"  >&lt;p&gt;Very interesting. I wouldn&#8217;t have thought of this as the cause. &lt;/p&gt;</comment>
                            <comment id="216523" author="gerrit" created="Sun, 17 Dec 2017 06:20:56 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30451/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30451/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10038&quot; title=&quot;sanity test 133g fails with &#8220; &amp;#39;$&amp;#39;mds1 find /proc/fs/lustre/n/proc/sys/lnet/n/sys/fs/lustre/n/sys/kernel/debug/lnet/n/sys/kernel/debug/lustre/ failed&amp;#39;&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10038&quot;&gt;&lt;del&gt;LU-10038&lt;/del&gt;&lt;/a&gt; test: ignore readdir races in sanity 133g&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 45f99f6562349f77be6b47bf1bc5a94abf9fd11d&lt;/p&gt;</comment>
                            <comment id="224719" author="jgmitter" created="Wed, 28 Mar 2018 14:46:33 +0000"  >&lt;p&gt;Patches have landed to master for 2.11.0&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="46803">LU-9700</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="36381">LU-8066</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="52716">LU-11152</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzkx3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>