<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:57:39 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-13017] sanity test 56ra fails with &apos; &apos;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&apos; should send 12 glimpse RPCs to OST&apos;</title>
                <link>https://jira.whamcloud.com/browse/LU-13017</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanity test_56ra fails with &apos; &apos;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&apos; should send 12 glimpse RPCs to OST&apos;. We&#8217;ve seen this error twice for 2.12.3.31; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/93cddba8-0d76-11ea-98f1-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/93cddba8-0d76-11ea-98f1-52540065bddc&lt;/a&gt; and &lt;a href=&quot;https://testing.whamcloud.com/test_sets/3d7c7eea-0d93-11ea-8e77-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/3d7c7eea-0d93-11ea-8e77-52540065bddc&lt;/a&gt;. &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity test 56ra: check lfs find -size -lazy works for data on OSTs =============================== 10:36:52 (1574332612)
Before: 6056 After: 6069 3
osc.lustre-OST0000-osc-ffff8c52a0a6c000.stats=
snapshot_time             1574332613.130677340 secs.nsecs
req_waittime              3629 samples [usec] 405 4908409 125708950 217095123014804
req_active                3630 samples [reqs] 1 16 10917 64837
ldlm_glimpse_enqueue      453 samples [reqs] 1 1 453 453
ldlm_extent_enqueue       518 samples [reqs] 1 1 518 518
read_bytes                10 samples [bytes] 0 1048576 4198400 4380984082432
write_bytes               729 samples [bytes] 4 1048576 465385120 468655951772936
ost_setattr               30 samples [usec] 493 2458360 5470309 7460871612619
ost_read                  10 samples [usec] 770 112506 334235 27947789183
ost_write                 729 samples [usec] 814 4301470 93802565 175458110296977
ost_connect               29 samples [usec] 655 12195 57083 265453679
ost_disconnect            17 samples [usec] 1029 3311 31912 67557322
ost_punch                 511 samples [usec] 503 954851 1266293 911962877985
ost_statfs                24 samples [usec] 405 1646 17727 14510469
ost_sync                  4 samples [usec] 471514 1920497 3598304 4653850793070
ost_quotactl              16 samples [usec] 414 838 8192 4340976
ldlm_cancel               952 samples [usec] 533 39037 11498758 185050546552
obd_ping                  336 samples [usec] 760 4908409 8456427 28320522807435
osc.lustre-OST0001-osc-ffff8c52a0a6c000.stats=
&#8230;
obd_ping                  364 samples [usec] 735 1157343 2386059 1579049113871
osc.lustre-OST0006-osc-ffff8c52a0a6c000.stats=
snapshot_time             1574332613.130931617 secs.nsecs
req_waittime              2363 samples [usec] 368 10241895 31650509 113679647081289
req_active                2364 samples [reqs] 1 15 6902 40110
ldlm_glimpse_enqueue      928 samples [reqs] 1 1 928 928
ldlm_extent_enqueue       14 samples [reqs] 1 1 14 14
read_bytes                2 samples [bytes] 0 0 0 0
write_bytes               19 samples [bytes] 1 1048576 12837273 13249571414835
ost_setattr               31 samples [usec] 459 10241895 14509301 109946640240861
ost_read                  2 samples [usec] 1823 2294 4117 8585765
ost_write                 19 samples [usec] 814 543145 1723935 429765970407
ost_connect               27 samples [usec] 450 12275 51577 247601705
ost_disconnect            14 samples [usec] 999 41957 63004 1800765226
ost_punch                 9 samples [usec] 622 73011 79728 5336508958
ost_statfs                24 samples [usec] 368 2161 14597 12673415
ost_quotactl              16 samples [usec] 388 462 6778 2878618
ldlm_cancel               927 samples [usec] 411 38371 11153645 173370727119
obd_ping                  352 samples [usec] 759 1625436 2970385 3116552824601
 sanity test_56ra: @@@@@@ FAIL: &apos;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&apos; should send 12 glimpse RPCs to OST 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We&#8217;ve seen an error message that may be related for Ubuntu clients &apos; &apos;/usr/bin/lfs find -size 5 -type f /mnt/lustre/d56ra.sanity&apos; should send 14 glimpse RPCs to OST&apos;; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/138a655c-0d5c-11ea-9487-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/138a655c-0d5c-11ea-9487-52540065bddc&lt;/a&gt; for patch &lt;a href=&quot;https://review.whamcloud.com/36405/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36405/&lt;/a&gt;. From the suite_log&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity test 56ra: check lfs find -size -lazy works for data on OSTs =============================== 17:50:11 (1574445011)
Before: 4861 After: 4873 3
osc.lustre-OST0000-osc-ffff9a29368bb000.stats=
snapshot_time             1574445012.373146631 secs.nsecs
req_waittime              2983 samples [usec] 355 2937458 64237903 59715964412813
req_active                2983 samples [reqs] 1 13 11657 81779
ldlm_glimpse_enqueue      277 samples [reqs] 1 1 277 277
ldlm_extent_enqueue       515 samples [reqs] 1 1 515 515
read_bytes                2 samples [bytes] 4096 4096 8192 33554432
write_bytes               569 samples [bytes] 3 4194304 498256074 1247211946247316
ost_setattr               33 samples [usec] 363 608888 739594 375188097832
ost_read                  2 samples [usec] 455 466 921 424181
ost_write                 569 samples [usec] 554 2937458 49108763 59029078413847
ost_connect               13 samples [usec] 464 6183 24328 96647710
ost_disconnect            9 samples [usec] 755 1623 11362 14999198
ost_punch                 511 samples [usec] 355 43985 346442 4026093074
ost_statfs                12 samples [usec] 357 1074 5859 3259503
ldlm_cancel               774 samples [usec] 390 63347 12515513 274346845595
obd_ping                  267 samples [usec] 502 117510 544749 18808100599
osc.lustre-OST0001-osc-ffff9a29368bb000.stats=
&#8230;
obd_ping                  265 samples [usec] 472 4485131 4896837 20121333347081
mdc.lustre-MDT0000-mdc-ffff9a29368bb000.stats=
snapshot_time             1574445012.413685221 secs.nsecs
req_waittime              938787 samples [usec] 305 1810374 538301391 13512208660843
req_active                938787 samples [reqs] 1 9 961715 1042401
ldlm_glimpse_enqueue      120 samples [reqs] 1 1 120 120
ldlm_ibits_enqueue        429407 samples [reqs] 1 1 429407 429407
mds_getattr               124 samples [usec] 337 1111 47863 19161519
mds_getattr_lock          5898 samples [usec] 470 7696 3084370 1885670342
mds_close                 116757 samples [usec] 305 892872 46303939 1088509049017
mds_readpage              352 samples [usec] 734 33119 1049972 8758729676
mds_statfs                14 samples [usec] 355 866 6123 2898905
mds_sync                  6 samples [usec] 1153 40361 46512 1636587052
mds_getxattr              28 samples [usec] 339 2024 26071 30757607
mds_hsm_state_set         8 samples [usec] 1150 1358 10021 12585899
ldlm_cancel               4370 samples [usec] 331 105790 2428541 42683139111
obd_ping                  204 samples [usec] 446 3724 188084 212073474
seq_query                 1 samples [usec] 472 472 472 222784
Before: 4873 After: 4885 3
Before: 4885 After: 4900 3
 sanity test_56ra: @@@@@@ FAIL: &apos;/usr/bin/lfs find -size 5 -type f /mnt/lustre/d56ra.sanity&apos; should send 14 glimpse RPCs to OST 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We&#8217;ve also seen this test fail in interop testing.&lt;/p&gt;

&lt;p&gt;There is nothing interesting in the node console logs. &lt;/p&gt;</description>
                <environment></environment>
        <key id="57471">LU-13017</key>
            <summary>sanity test 56ra fails with &apos; &apos;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&apos; should send 12 glimpse RPCs to OST&apos;</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="adilger">Andreas Dilger</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Tue, 26 Nov 2019 16:08:35 +0000</created>
                <updated>Sat, 4 Mar 2023 21:58:47 +0000</updated>
                            <resolved>Thu, 16 Apr 2020 05:26:26 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                    <version>Lustre 2.12.4</version>
                                    <fixVersion>Lustre 2.14.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="258853" author="pjones" created="Tue, 26 Nov 2019 18:18:45 +0000"  >&lt;p&gt;Gu Zheng&lt;/p&gt;

&lt;p&gt;Could you please advise?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="258876" author="guzheng" created="Wed, 27 Nov 2019 01:52:37 +0000"  >&lt;p&gt;Sure, Peter&lt;/p&gt;</comment>
                            <comment id="258879" author="adilger" created="Wed, 27 Nov 2019 03:35:53 +0000"  >&lt;p&gt;I don&apos;t think the patch &lt;a href=&quot;https://review.whamcloud.com/36405/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36405/&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11575&quot; title=&quot;make debs fail on debian without systemd enabled&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11575&quot;&gt;&lt;del&gt;LU-11575&lt;/del&gt;&lt;/a&gt; build: install systemd stuff only for debian with systemd enabled&lt;/tt&gt;&quot; could possibly be causing the test failures itself.  It is more likely that this is being seen on this patch because it is one of the few/only patches that is running Ubuntu client testing.  That probably implies that we should have a regular Ubuntu client test session, at least as part of &quot;&lt;tt&gt;full&lt;/tt&gt;&quot;, or possibly an optional session as part of regular review testing if there are test nodes available.&lt;/p&gt;</comment>
                            <comment id="258941" author="guzheng" created="Thu, 28 Nov 2019 02:51:18 +0000"  >&lt;p&gt;Yeah, agreed to Andreas.&lt;/p&gt;

&lt;p&gt;Patch  &lt;a href=&quot;https://review.whamcloud.com/36405/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36405/&lt;/a&gt; (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11575&quot; title=&quot;make debs fail on debian without systemd enabled&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11575&quot;&gt;&lt;del&gt;LU-11575&lt;/del&gt;&lt;/a&gt;) is only systemd unit stuff, there&apos;s no core change in it. I think these error occurred because the testing clients are Ubuntu, Ubuntu testing sessions are really limited on our normal review, especially 18.04.&lt;/p&gt;</comment>
                            <comment id="259996" author="adilger" created="Mon, 16 Dec 2019 18:22:35 +0000"  >&lt;p&gt;Gu, any update on this ticket?&lt;/p&gt;</comment>
                            <comment id="260307" author="yujian" created="Sat, 21 Dec 2019 18:54:55 +0000"  >&lt;p&gt;This failure is affecting patch review testing on Lustre b2_12 branch.&lt;/p&gt;</comment>
                            <comment id="265825" author="adilger" created="Sun, 22 Mar 2020 00:00:37 +0000"  >&lt;p&gt;What is interesting is that of the 31 failures of sanity.sh test_56ra in the past 4 weeks, 29 of them are on b2_12 (about 30% failure rate on that branch), and the one on master is an interop test with master and b2_12.&lt;/p&gt;

&lt;p&gt;On master the patch is skipping the test for MDS &amp;lt; 2.12.58, but on b2_12 it isn&apos;t doing that.  However, the presence of the test on b2_12 implies that the LSOM functionality was backported and &lt;em&gt;should&lt;/em&gt; be working.&lt;/p&gt;</comment>
                            <comment id="265826" author="gerrit" created="Sun, 22 Mar 2020 00:31:58 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/38021&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/38021&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13017&quot; title=&quot;sanity test 56ra fails with &amp;#39; &amp;#39;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&amp;#39; should send 12 glimpse RPCs to OST&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13017&quot;&gt;&lt;del&gt;LU-13017&lt;/del&gt;&lt;/a&gt; tests: clean up sanity 56ra add debugging&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8193ecaa7e199d2dc237a4e318158a7b33d425f3&lt;/p&gt;</comment>
                            <comment id="265841" author="adilger" created="Mon, 23 Mar 2020 02:55:53 +0000"  >&lt;p&gt;It looks like the first failure of test_56ra on b2_12 with the &quot;&lt;tt&gt;&apos;/usr/bin/lfs find -size 5 -type f /mnt/lustre/d56ra.sanity&apos; should send 12 glimpse RPCs to OST&lt;/tt&gt;&quot; message was on 2019-11-21, which is the same day that patch &lt;a href=&quot;https://review.whamcloud.com/36553&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36553&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11367&quot; title=&quot;integrate LSOM with lfs find&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11367&quot;&gt;&lt;del&gt;LU-11367&lt;/del&gt;&lt;/a&gt; som: integrate LSOM with lfs find&lt;/tt&gt;&quot; landed on that branch.  This was confused by some earlier cases of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13148&quot; title=&quot;Interop: sanity test 56ra fails with &amp;#39;&amp;#39;/usr/bin/lfs find -size 0 -type f -lazy /mnt/lustre/d56ra.sanity&amp;#39; should not send glimpse RPCs to OST&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13148&quot;&gt;&lt;del&gt;LU-13148&lt;/del&gt;&lt;/a&gt; causing test_56ra to also fail on b2_12, but these were caused by interop testing between 2.12.2 and 2.12.58.&lt;/p&gt;</comment>
                            <comment id="267200" author="adilger" created="Wed, 8 Apr 2020 21:46:44 +0000"  >&lt;p&gt;Patch 38021 has recently landed to b2_12, and most/all of the failures on master are due to interop tests.  If this problem is no longer seen on master/b2_12 in a week then this can be closed.&lt;/p&gt;</comment>
                            <comment id="276450" author="neilb" created="Fri, 31 Jul 2020 03:30:46 +0000"  >&lt;p&gt;I know this is &quot;resolved&quot; so maybe no-one will see this, but I&apos;ll report anyway.&lt;/p&gt;

&lt;p&gt;I got this failure reliably in my testing.&#160; The test is sensitive to timing and performance.&lt;/p&gt;

&lt;p&gt;I build my kernels with lots of debugging options enabled, and there is a significant performance penalty - but I like the benefits.&lt;/p&gt;

&lt;p&gt;If it takes more than 1 second from the start of one &quot;lfs find&quot; to the start of the next, failures are quite likely.&lt;/p&gt;

&lt;p&gt;While the &apos;lfs find&quot; is calling &apos;stat&apos; on each file, a background &quot;stat-ahead&quot; thread is doing a similar thing.&lt;/p&gt;

&lt;p&gt;If &quot;find&quot; gets in before &quot;stat-ahead&quot;, the stat-ahead will skip the RPC.&#160; If the &lt;b&gt;previous&lt;/b&gt; find did a &apos;stat&apos; within the last 1 second, &quot;stat-ahead&quot; will again skip the RPC.&#160; But if the previous stat was more than 1 second ago, and the current find hasn&apos;t got up to the file yet, the stat-ahead &lt;b&gt;will&lt;/b&gt; do an RPC.&#160; Then the find will do its own RPC, resulting in extra RPCs.&lt;/p&gt;

&lt;p&gt;It seems strange that a &quot;stat()&quot; call will do an RPC even if there was a very recent stat-ahead RPC.&#160; What is the point of stat-ahead in that case?&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="276452" author="adilger" created="Fri, 31 Jul 2020 05:03:02 +0000"  >&lt;p&gt;Seems like a bug in statahead?&lt;/p&gt;</comment>
                            <comment id="276625" author="neilb" created="Tue, 4 Aug 2020 01:16:45 +0000"  >&lt;p&gt;Maybe it&apos;s a bug...&lt;/p&gt;

&lt;p&gt;It appears to be a problem when the &apos;stat()&apos; system call happens &lt;b&gt;after&lt;/b&gt; the stat-ahead, and &lt;b&gt;before&lt;/b&gt; the statahead receives a reply.&#160; During that time the lock that was created and requested is not attached to the (inode&apos;s) resource, so it isn&apos;t found when the &apos;stat&apos; attempts to find a lock, so instead it creates a new lock.&#160; As both locks are PR they don&apos;t conflict and you get two locks on the one resource, both holding size/timestamp info.&lt;/p&gt;

&lt;p&gt;If the &apos;stat()&apos; happens &lt;b&gt;before&lt;/b&gt; the stat-ahead, the whole operation is under a semaphore so the stat-ahead aborts as it cannot get the lock.&#160; If the stat() happens &lt;b&gt;after&lt;/b&gt; the statahead gets a reply, the lock will be attached to the resource, and the stat() will find and re-use it.&lt;/p&gt;

&lt;p&gt;Maybe we could add a &quot;pending&quot; list to the ldlm_resource, and add locks there that have been requested from the server, but that haven&apos;t had a reply yet.&#160; That would allow us to check if a stat-ahead was pending.&lt;/p&gt;

&lt;p&gt;I tried adding a &apos;statahead-pending&apos; flag to the inode, but the lock grant doesn&apos;t get back to the osc layer - it just sits in the ldlm waiting to be requested again.&lt;/p&gt;</comment>
                            <comment id="277403" author="neilb" created="Thu, 13 Aug 2020 05:14:55 +0000"  >&lt;p&gt;It is actually the &apos;agl&apos; thread within the statahead module that is causing the problem.&lt;br/&gt;
And, of course,&#160; it is the &quot;mdc&quot; layer that the lock-grant would need to get to, not the &quot;osc&quot; layer.&lt;/p&gt;

&lt;p&gt;I think the best way forward is to disable agl while the rest is running.&lt;/p&gt;

&lt;p&gt;This will have the added advantage of testing the &quot;disable agl&quot; code, which is currently buggy&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="277432" author="gerrit" created="Thu, 13 Aug 2020 06:14:34 +0000"  >&lt;p&gt;Neil Brown (neilb@suse.de) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/39667&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/39667&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13017&quot; title=&quot;sanity test 56ra fails with &amp;#39; &amp;#39;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&amp;#39; should send 12 glimpse RPCs to OST&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13017&quot;&gt;&lt;del&gt;LU-13017&lt;/del&gt;&lt;/a&gt; tests: disable statahead_agl for test_56ra&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: d6febfd38d409af0b42291a94ee007acc7f06e23&lt;/p&gt;</comment>
                            <comment id="282007" author="gerrit" created="Mon, 12 Oct 2020 05:44:42 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/39667/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/39667/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13017&quot; title=&quot;sanity test 56ra fails with &amp;#39; &amp;#39;/usr/bin/lfs find -size 0 -type f /mnt/lustre/d56ra.sanity&amp;#39; should send 12 glimpse RPCs to OST&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13017&quot;&gt;&lt;del&gt;LU-13017&lt;/del&gt;&lt;/a&gt; tests: disable statahead_agl for sanity test_56ra&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3e04c4f0757c228fc9a1967617e12f3e73e8ffaf&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="58464">LU-13378</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="55833">LU-12378</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="57822">LU-13148</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00pzr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>