<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:39:06 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10891] sanityn test 77a, 77b, 77c, 77d, 77e, 77f, 77j and 77k all fail after 32a with &apos;dd at *MB on client failed (2)&apos;</title>
                <link>https://jira.whamcloud.com/browse/LU-10891</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Looking at the client test_log for the test session&lt;/p&gt;

&lt;p&gt;2018-03-17 04:24:28 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/24378326-29c9-11e8-b6a0-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/24378326-29c9-11e8-b6a0-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;we see several calls to dd fail&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;trevis-52vm2: 1048576 bytes (1.0 MB) copied, 0.00186497 s, 562 MB/s
trevis-52vm1: dd: error writing &#8216;/mnt/lustre/d77a.sanityn/nrs_w_trevis-52vm1.trevis.hpdd.intel.com&#8217;: Input/output error
trevis-52vm1: 1+0 records in
trevis-52vm1: 0+0 records out
trevis-52vm1: 0 bytes (0 B) copied, 99.1917 s, 0.0 kB/s
trevis-52vm1: 1+0 records in
trevis-52vm1: 1+0 records out
trevis-52vm1: 1048576 bytes (1.0 MB) copied, 0.0330184 s, 31.8 MB/s 
sanityn test_77a: @@@@@@ FAIL: dd at 5MB on client failed (2) 
trevis-52vm1: dd: cannot fstat &#8216;/mnt/lustre/d77a.sanityn/nrs_w_trevis-52vm1.trevis.hpdd.intel.com&#8217;: Cannot send after transport endpoint shutdown 
sanityn test_77a: @@@@@@ FAIL: dd at 7MB on client failed (2)

&#8230;

trevis-52vm1: 0 bytes (0 B) copied, 0.0273896 s, 0.0 kB/s 
sanityn test_77a: @@@@@@ FAIL: dd at 6MB on client failed (2) 
trevis-52vm1: dd: failed to truncate to 13631488 bytes in output file &#8216;/mnt/lustre/d77a.sanityn/nrs_w_trevis-52vm1.trevis.hpdd.intel.com&#8217;: Input/output error
trevis-52vm1: dd: failed to truncate to 4194304 bytes in output file &#8216;/mnt/lustre/d77a.sanityn/nrs_w_trevis-52vm1.trevis.hpdd.intel.com&#8217;: Cannot send after transport endpoint shutdown 
sanityn test_77a: @@@@@@ FAIL: dd at 13MB on client failed (2) &#160;
sanityn test_77a: @@@@@@ FAIL: dd at 4MB on client failed (2) 
trevis-52vm1: 1+0 records in
trevis-52vm1: 1+0 records out
trevis-52vm1: 1048576 bytes (1.0 MB) copied, 0.00703154 s, 149 MB/s&#160; 
Trace dump:&#160; = /usr/lib64/lustre/tests/test-framework.sh:5764:error()&#160; 
= /usr/lib64/lustre/tests/sanityn.sh:3084:nrs_write_read()&#160; 
= /usr/lib64/lustre/tests/sanityn.sh:3113:test_77a()&#160;
 = /usr/lib64/lustre/tests/test-framework.sh:6040:run_one()&#160; 
= /usr/lib64/lustre/tests/test-framework.sh:6079:run_one_logged()&#160; 
= /usr/lib64/lustre/tests/test-framework.sh:5926:run_test()&#160; 
= /usr/lib64/lustre/tests/sanityn.sh:3117:main()
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;In the dmesg log on the OSS, we see a stack trace and complaint that there are too many service threads and not enough resources&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[11362.460839] Lustre: DEBUG MARKER: == sanityn test 77a: check FIFO NRS policy =========================================================== 05:12:32 (1521263552)
[11362.701936] Lustre: DEBUG MARKER: lctl set_param ost.OSS.*.nrs_policies=fifo
[11407.968024] LNet: Service thread pid 8563 was inactive for 40.05s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[11407.973431] Pid: 8563, comm: ll_ost_io00_012
[11407.975284] Call Trace:
[11407.978482]&#160; [&amp;lt;ffffffff816b40e9&amp;gt;] schedule+0x29/0x70
[11407.980373]&#160; [&amp;lt;ffffffff816b1984&amp;gt;] schedule_timeout+0x174/0x2c0
[11407.982265]&#160; [&amp;lt;ffffffff8109c080&amp;gt;] ? process_timeout+0x0/0x10
[11407.984109]&#160; [&amp;lt;ffffffffc0ab65f0&amp;gt;] ? ldlm_expired_completion_wait+0x0/0x220 [ptlrpc]
[11407.986141]&#160; [&amp;lt;ffffffffc0ab6dc1&amp;gt;] ldlm_completion_ast+0x5b1/0x920 [ptlrpc]
[11407.988081]&#160; [&amp;lt;ffffffff810c7c70&amp;gt;] ? default_wake_function+0x0/0x20
[11407.989996]&#160; [&amp;lt;ffffffffc0ab7c13&amp;gt;] ldlm_cli_enqueue_local+0x233/0x860 [ptlrpc]
[11407.991934]&#160; [&amp;lt;ffffffffc0ab6810&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
[11407.993914]&#160; [&amp;lt;ffffffffc0abc070&amp;gt;] ? ldlm_blocking_ast+0x0/0x170 [ptlrpc]
[11407.995835]&#160; [&amp;lt;ffffffffc0ab9360&amp;gt;] ? ldlm_glimpse_ast+0x0/0x10 [ptlrpc]
[11407.997834]&#160; [&amp;lt;ffffffffc0b4b007&amp;gt;] tgt_extent_lock+0xe7/0x290 [ptlrpc]
[11407.999761]&#160; [&amp;lt;ffffffffc0abc070&amp;gt;] ? ldlm_blocking_ast+0x0/0x170 [ptlrpc]
[11408.001742]&#160; [&amp;lt;ffffffffc0ab6810&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
[11408.003644]&#160; [&amp;lt;ffffffffc0ab9360&amp;gt;] ? ldlm_glimpse_ast+0x0/0x10 [ptlrpc]
[11408.005520]&#160; [&amp;lt;ffffffffc0e71c7f&amp;gt;] ofd_punch_hdl+0x30f/0x9c0 [ofd]
[11408.007296]&#160; [&amp;lt;ffffffffc0b51eea&amp;gt;] tgt_request_handle+0x92a/0x13b0 [ptlrpc]
[11408.009176]&#160; [&amp;lt;ffffffffc0af7823&amp;gt;] ptlrpc_server_handle_request+0x253/0xab0 [ptlrpc]
[11408.011048]&#160; [&amp;lt;ffffffffc0afafd2&amp;gt;] ptlrpc_main+0xa92/0x1e40 [ptlrpc]
[11408.012880]&#160; [&amp;lt;ffffffffc0afa540&amp;gt;] ? ptlrpc_main+0x0/0x1e40 [ptlrpc]
[11408.014611]&#160; [&amp;lt;ffffffff810b4031&amp;gt;] kthread+0xd1/0xe0
[11408.016294]&#160; [&amp;lt;ffffffff810b3f60&amp;gt;] ? kthread+0x0/0xe0
[11408.017934]&#160; [&amp;lt;ffffffff816c0577&amp;gt;] ret_from_fork+0x77/0xb0
[11408.019664]&#160; [&amp;lt;ffffffff810b3f60&amp;gt;] ? kthread+0x0/0xe0&#160;
[11408.022731] LustreError: dumping log to /tmp/lustre-log.1521263598.8563
[11467.000052] LustreError: 0:0:(ldlm_lockd.c:331:waiting_locks_callback()) ### lock callback timer expired after 99s: evicting client at 10.9.6.82@tcp&#160; ns: filter-lustre-OST0004_UUID lock: ffff880060b46240/0x6aadc925cc163a6a lrc: 3/0,0 mode: PW/PW res: [0x54e0:0x0:0x0].0x0 rrc: 3 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;1048575) flags: 0x60000400000020 nid: 10.9.6.82@tcp remote: 0x9b5c5b055d72d1f4 expref: 6 pid: 13081 timeout: 11467 lvb_type: 0[11467.059728] Lustre: lustre-OST0004: Connection restored to 822495a8-31b5-9951-2f9c-71e8bb3032cb (at 10.9.6.82@tcp)
[11467.145213] LNet: Service thread pid 8563 completed after 99.23s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[11467.488810] Lustre: DEBUG MARKER: /usr/sbin/lctl mark&#160; sanityn test_77a: @@@@@@ FAIL: dd at 5MB on client failed \(2\) 
[11467.509827] Lustre: DEBUG MARKER: /usr/sbin/lctl mark&#160; sanityn test_77a: @@@@@@ FAIL: dd at 7MB on client failed \(2\) 
[11467.702702] Lustre: DEBUG MARKER: sanityn test_77a: @@@@@@ FAIL: dd at 5MB on client failed (2)
[11467.723042] Lustre: DEBUG MARKER: sanityn test_77a: @@@@@@ FAIL: dd at 7MB on client failed (2)
[11467.967245] Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /home/autotest2/autotest/logs/test_logs/2018-03-17/lustre-reviews-el7-x86_64--review-dne-part-1--1_11_1__55338___3714d0fe-68b1-4332-9b84-47eab7aaf171/sanityn.test_77a.debug_log.$(hostname -s).1521263658.log;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160; dmesg &amp;gt; /home/auto
[11468.064728] Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /home/autotest2/autotest/logs/test_logs/2018-03-17/lustre-reviews-el7-x86_64--review-dne-part-1--1_11_1__55338___3714d0fe-68b1-4332-9b84-47eab7aaf171/sanityn.test_77a.debug_log.$(hostname -s).1521263658.log;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160; dmesg &amp;gt; /home/auto
[11472.082190] Lustre: DEBUG MARKER: lctl set_param -n fail_loc=0 &#160;&#160; &#160;&#160;&#160;&#160;fail_val=0 2&amp;gt;/dev/null
[11472.179737] Lustre: DEBUG MARKER: lctl set_param -n fail_loc=0 &#160;&#160; &#160;&#160;&#160;&#160;fail_val=0 2&amp;gt;/dev/null
[11567.000038] LustreError: 0:0:(ldlm_lockd.c:331:waiting_locks_callback()) ### lock callback timer expired after 100s: evicting client at 10.9.6.82@tcp&#160; ns: filter-lustre-OST0005_UUID lock: ffff88004a400240/0x6aadc925cc163b74 lrc: 3/0,0 mode: PW/PW res: [0x54c0:0x0:0x0].0x0 rrc: 3 type: EXT [0-&amp;gt;18446744073709551615] (req 1048576-&amp;gt;2097151) flags: 0x60000400010020 nid: 10.9.6.82@tcp remote: 0x9b5c5b055d72d2e2 expref: 6 pid: 9531 timeout: 11567 lvb_type: 0
[11567.050198] LustreError: 9187:0:(ldlm_lockd.c:2201:ldlm_cancel_handler()) ldlm_cancel from 10.9.6.82@tcp arrived at 1521263757 with bad export cookie 7687021303166253843
[11567.055145] Lustre: lustre-OST0005: Connection restored to 822495a8-31b5-9951-2f9c-71e8bb3032cb (at 10.9.6.82@tcp)
[11567.441575] Lustre: DEBUG MARKER: /usr/sbin/lctl mark&#160; sanityn test_77a: @@@@@@ FAIL: dd at 4MB on client failed \(2\)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;We&#8217;ve seen these tests fail before; see ATM-60/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8668&quot; title=&quot;sanityn test 77a, 77b, 77c, 77d, 77e, 77f all fail with &amp;#39;dd on client failed&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8668&quot;&gt;&lt;del&gt;LU-8668&lt;/del&gt;&lt;/a&gt;. I&#8217;ve been told that pdsh still uses mrsh for non-selinux test sessions.&lt;/p&gt;

&lt;p&gt;All of the following test sessions ran on trevis except for one on 10/13. Some of the following failures may be due to the patch being reviewed except for the failures on 2018-03-17 which is a no-op patch.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;2017-07-27 06:48:11 UTC &#8211; &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ff1b47b8-72d4-11e7-a0a2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ff1b47b8-72d4-11e7-a0a2-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;3 more failures between these dates&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;2017-09-27 04:24:43 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e142bd58-a393-11e7-b786-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e142bd58-a393-11e7-b786-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2017-10-01 18:36:53 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/12f0d24e-a732-11e7-b786-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/12f0d24e-a732-11e7-b786-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2017-10-13 02:50:27 UTC on onyx - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e7457ab4-b019-11e7-8d8d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e7457ab4-b019-11e7-8d8d-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2017-10-18 19:29:07 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/dd0fe636-b48f-11e7-9eee-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/dd0fe636-b48f-11e7-9eee-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;16 more failures between these dates&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;2018-02-12 11:34:28 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7e8bb5f0-1005-11e8-a7cd-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7e8bb5f0-1005-11e8-a7cd-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-17 04:24:28 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/24378326-29c9-11e8-b6a0-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/24378326-29c9-11e8-b6a0-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-22 11:10:49 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/524dbdfe-2df0-11e8-b74b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/524dbdfe-2df0-11e8-b74b-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-27 22:47:40 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d5f150e8-3242-11e8-b6a0-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d5f150e8-3242-11e8-b6a0-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-04-04 18:36:13 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6b2266f8-385f-11e8-b45c-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6b2266f8-385f-11e8-b45c-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-04-05 17:30:51 UTC - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/c91ef600-3921-11e8-960d-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/c91ef600-3921-11e8-960d-52540065bddc&lt;/a&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="51717">LU-10891</key>
            <summary>sanityn test 77a, 77b, 77c, 77d, 77e, 77f, 77j and 77k all fail after 32a with &apos;dd at *MB on client failed (2)&apos;</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="pfarrell">Patrick Farrell</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>DNE</label>
                    </labels>
                <created>Mon, 9 Apr 2018 16:08:45 +0000</created>
                <updated>Fri, 2 Aug 2019 19:11:44 +0000</updated>
                            <resolved>Sun, 20 Jan 2019 16:02:33 +0000</resolved>
                                    <version>Lustre 2.11.0</version>
                    <version>Lustre 2.12.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="227926" author="adilger" created="Tue, 15 May 2018 22:55:01 +0000"  >&lt;p&gt;+1 on master with patch &lt;a href=&quot;https://review.whamcloud.com/16682&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/16682&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7236&quot; title=&quot;connections on demand&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7236&quot;&gt;&lt;del&gt;LU-7236&lt;/del&gt;&lt;/a&gt; ptlrpc: idle connections can disconnect&quot;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/919420c8-5890-11e8-93e6-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/919420c8-5890-11e8-93e6-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="227928" author="adilger" created="Tue, 15 May 2018 23:03:02 +0000"  >&lt;p&gt;I checked out a bunch of these failures, and all of them happened after sanityn test_32a failed with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10059&quot; title=&quot;sanityn test_32a: wrong file size&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10059&quot;&gt;&lt;del&gt;LU-10059&lt;/del&gt;&lt;/a&gt;.  It might be that this test is leaving behind cruft that is causing follow on failures?&lt;/p&gt;

&lt;p&gt;I didn&apos;t check into the details, but it seems like this is a failure in NRS, so it might have been introduced with a change to the NRS code (if that happened around the time it started failing).&lt;/p&gt;</comment>
                            <comment id="240410" author="pfarrell" created="Sun, 20 Jan 2019 15:28:24 +0000"  >&lt;p&gt;Putting this here as well as in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10059&quot; title=&quot;sanityn test_32a: wrong file size&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10059&quot;&gt;&lt;del&gt;LU-10059&lt;/del&gt;&lt;/a&gt;: 32n isn&apos;t cleaning up on failure - it&apos;s leaving lockless truncate enabled, which could cause this sort of problem.&lt;/p&gt;</comment>
                            <comment id="240414" author="pfarrell" created="Sun, 20 Jan 2019 16:02:33 +0000"  >&lt;p&gt;Appears to be fallout from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10059&quot; title=&quot;sanityn test_32a: wrong file size&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10059&quot;&gt;&lt;del&gt;LU-10059&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="240418" author="adilger" created="Sun, 20 Jan 2019 19:11:11 +0000"  >&lt;p&gt;It wouldn&apos;t be terrible to change test 32n to use stack_trap so that it cleans up properly after a failure. While it doesn&apos;t fix the root problem, it does avoid lots of fallout that can confuse the issue, as can be seen here. &lt;/p&gt;</comment>
                            <comment id="240419" author="pfarrell" created="Sun, 20 Jan 2019 19:31:48 +0000"  >&lt;p&gt;See&#160;&lt;a href=&quot;https://review.whamcloud.com/#/c/34070/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/34070/&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="48556">LU-10059</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzvi7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>