<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:08:20 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7372] replay-dual test_26: test failed to respond and timed out</title>
                <link>https://jira.whamcloud.com/browse/LU-7372</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Saurabh Tandan &amp;lt;saurabh.tandan@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1e79d2a6-7d21-11e5-a254-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1e79d2a6-7d21-11e5-a254-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_26 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Client dmesg:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: test_26 fail mds1 1 times
LustreError: 980:0:(ldlm_request.c:130:ldlm_expired_completion_wait()) ### lock timed out (enqueued at 1445937610, 300s ago), entering recovery for MGS@10.2.4.140@tcp ns: MGC10.2.4.140@tcp lock: ffff88007bdd82c0/0x956ab2c8047544d6 lrc: 4/1,0 mode: --/CR res: [0x65727473756c:0x2:0x0].0x0 rrc: 1 type: PLN flags: 0x1000000000000 nid: local remote: 0x223a79061b204538 expref: -99 pid: 980 timeout: 0 lvb_type: 0
Lustre: 29433:0:(client.c:2039:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1445937910/real 1445937910]  req@ffff880028347980 x1516173751413108/t0(0) o250-&amp;gt;MGC10.2.4.140@tcp@10.2.4.140@tcp:26/25 lens 520/544 e 0 to 1 dl 1445937916 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
Lustre: 29433:0:(client.c:2039:ptlrpc_expire_one_request()) Skipped 67 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS console:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;09:22:17:LustreError: 24638:0:(client.c:1138:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff88004d92c980 x1516158358024328/t0(0) o101-&amp;gt;lustre-MDT0000-lwp-MDT0000@0@lo:23/10 lens 456/496 e 0 to 0 dl 0 ref 2 fl Rpc:/0/ffffffff rc 0/-1
09:25:19:LustreError: 24638:0:(client.c:1138:ptlrpc_import_delay_req()) Skipped 6 previous similar messages
09:25:19:LustreError: 24638:0:(qsd_reint.c:55:qsd_reint_completion()) lustre-MDT0000: failed to enqueue global quota lock, glb fid:[0x200000006:0x10000:0x0], rc:-5
09:25:19:LustreError: 24638:0:(qsd_reint.c:55:qsd_reint_completion()) Skipped 1 previous similar message
09:25:19:INFO: task umount:24629 blocked for more than 120 seconds.
09:25:19:      Not tainted 2.6.32-573.7.1.el6_lustre.x86_64 #1
09:25:19:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
09:25:19:umount        D 0000000000000000     0 24629  24628 0x00000080
09:25:19: ffff880059e2bb48 0000000000000086 0000000000000000 00000000000708b7
09:25:20: 0000603500000000 000000ac00000000 00001c1fd9b9c014 ffff880059e2bb98
09:25:20: ffff880059e2bb58 0000000101d3458a ffff880076ee3ad8 ffff880059e2bfd8
09:25:20:Call Trace:
09:25:20: [&amp;lt;ffffffff8153a756&amp;gt;] __mutex_lock_slowpath+0x96/0x210
09:25:20: [&amp;lt;ffffffff8153a27b&amp;gt;] mutex_lock+0x2b/0x50
09:25:20: [&amp;lt;ffffffffa02cb30d&amp;gt;] mgc_process_config+0x1dd/0x1210 [mgc]
09:25:20: [&amp;lt;ffffffffa0476b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
09:25:20: [&amp;lt;ffffffffa07fe28d&amp;gt;] obd_process_config.clone.0+0x8d/0x2e0 [obdclass]
09:25:20: [&amp;lt;ffffffffa0476b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
09:25:20: [&amp;lt;ffffffffa08024c2&amp;gt;] lustre_end_log+0x262/0x6a0 [obdclass]
09:25:20: [&amp;lt;ffffffffa082efb1&amp;gt;] server_put_super+0x911/0xed0 [obdclass]
09:25:20: [&amp;lt;ffffffff811b0116&amp;gt;] ? invalidate_inodes+0xf6/0x190
09:25:20: [&amp;lt;ffffffff8119437b&amp;gt;] generic_shutdown_super+0x5b/0xe0
09:25:20: [&amp;lt;ffffffff81194466&amp;gt;] kill_anon_super+0x16/0x60
09:25:20: [&amp;lt;ffffffffa07fa096&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
09:25:20: [&amp;lt;ffffffff81194c07&amp;gt;] deactivate_super+0x57/0x80
09:25:20: [&amp;lt;ffffffff811b4a7f&amp;gt;] mntput_no_expire+0xbf/0x110
09:25:20: [&amp;lt;ffffffff811b55cb&amp;gt;] sys_umount+0x7b/0x3a0
09:25:20: [&amp;lt;ffffffff8100b0d2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Info required for matching: replay-dual test_26&lt;/p&gt;</description>
                <environment>Server/Client : master, build # 3225 RHEL 6.7&lt;br/&gt;
</environment>
        <key id="32965">LU-7372</key>
            <summary>replay-dual test_26: test failed to respond and timed out</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 2 Nov 2015 17:50:07 +0000</created>
                <updated>Thu, 23 Dec 2021 07:16:44 +0000</updated>
                            <resolved>Thu, 22 Jul 2021 04:16:33 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                    <version>Lustre 2.9.0</version>
                    <version>Lustre 2.10.0</version>
                    <version>Lustre 2.11.0</version>
                    <version>Lustre 2.12.0</version>
                    <version>Lustre 2.10.3</version>
                    <version>Lustre 2.10.4</version>
                    <version>Lustre 2.10.5</version>
                    <version>Lustre 2.12.4</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>18</watches>
                                                                            <comments>
                            <comment id="135312" author="pjones" created="Fri, 4 Dec 2015 23:35:33 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Could you please advise on this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="136046" author="standan" created="Fri, 11 Dec 2015 17:10:13 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Regression:EL6.7 Server/EL6.7 Client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/67ff2254-9f0a-11e5-ba94-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/67ff2254-9f0a-11e5-ba94-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136057" author="standan" created="Fri, 11 Dec 2015 17:46:49 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Regression:EL7.1 Server/EL6.7 Client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7a981fb0-9f0a-11e5-8d81-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7a981fb0-9f0a-11e5-8d81-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136064" author="standan" created="Fri, 11 Dec 2015 18:07:41 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Regression:EL7.1 Server/EL7.1 Client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/76a79476-9f37-11e5-ba94-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/76a79476-9f37-11e5-ba94-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136081" author="standan" created="Fri, 11 Dec 2015 18:54:08 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Regression:EL7.1 Server/SLES11 SP3 Client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/17e264d6-9f2b-11e5-bf9b-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/17e264d6-9f2b-11e5-bf9b-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136268" author="sarah" created="Mon, 14 Dec 2015 22:39:16 +0000"  >&lt;p&gt;another instance seen on RHEL6.7 server/SLES11 SP3 client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/52597c80-a052-11e5-a33d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/52597c80-a052-11e5-a33d-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136524" author="bobijam" created="Wed, 16 Dec 2015 12:34:03 +0000"  >&lt;p&gt;mgs_ir_fini_fs() was waiting for the completion signal supposed to be issued by mgs_ir_notify(), while mgs_ir_notify() was waiting in the previous loop for the revoking of a CONFIG_T_RECOVERY lock indefinitely.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedHeader panelHeader&quot; style=&quot;border-bottom-width: 1px;&quot;&gt;&lt;b&gt;mgs_ir_fini_fs&lt;/b&gt;&lt;/div&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[11146.534027] umount          D ffff88007fc13680     0   416    415 0x00000080
[11146.534027]  ffff88007a183a00 0000000000000086 ffff88007a183fd8 0000000000013680
[11146.534027]  ffff88007a183fd8 0000000000013680 ffff880078725b00 ffff880061536728
[11146.534027]  ffff880061536730 7fffffffffffffff ffff880078725b00 ffff880061536600
[11146.534027] Call Trace:
[11146.534027]  [&amp;lt;ffffffff8160a709&amp;gt;] schedule+0x29/0x70
[11146.534027]  [&amp;lt;ffffffff81608649&amp;gt;] schedule_timeout+0x209/0x2d0
[11146.534027]  [&amp;lt;ffffffff81601c94&amp;gt;] ? __slab_free+0x10e/0x277
[11146.534027]  [&amp;lt;ffffffff8160ac16&amp;gt;] wait_for_completion+0x116/0x170
[11146.534027]  [&amp;lt;ffffffff810a9660&amp;gt;] ? wake_up_state+0x20/0x20
[11146.534027]  [&amp;lt;ffffffffa0cf0930&amp;gt;] mgs_ir_fini_fs+0x250/0x46c [mgs]
[11146.534027]  [&amp;lt;ffffffffa0cd8fac&amp;gt;] mgs_free_fsdb+0x4c/0xcd0 [mgs]
[11146.534027]  [&amp;lt;ffffffffa0ce5502&amp;gt;] mgs_cleanup_fsdb_list+0x52/0x70 [mgs]
[11146.534027]  [&amp;lt;ffffffffa0cc9f27&amp;gt;] mgs_device_fini+0x97/0x5b0 [mgs]
[11146.534027]  [&amp;lt;ffffffffa079b4e4&amp;gt;] class_cleanup+0x734/0xcc0 [obdclass]
[11146.534027]  [&amp;lt;ffffffffa079dd83&amp;gt;] class_process_config+0x1bf3/0x2cf0 [obdclass]
[11146.534027]  [&amp;lt;ffffffffa067b3c7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[11146.534027]  [&amp;lt;ffffffffa079ef6f&amp;gt;] class_manual_cleanup+0xef/0xba0 [obdclass]
[11146.534027]  [&amp;lt;ffffffffa07d6880&amp;gt;] server_put_super+0xcc0/0xea0 [obdclass]
[11146.534027]  [&amp;lt;ffffffff811c9426&amp;gt;] generic_shutdown_super+0x56/0xe0
[11146.534027]  [&amp;lt;ffffffff811c9692&amp;gt;] kill_anon_super+0x12/0x20
[11146.534027]  [&amp;lt;ffffffffa07a2c42&amp;gt;] lustre_kill_super+0x32/0x50 [obdclass]
[11146.534027]  [&amp;lt;ffffffff811c9a3d&amp;gt;] deactivate_locked_super+0x3d/0x60
[11146.534027]  [&amp;lt;ffffffff811ca046&amp;gt;] deactivate_super+0x46/0x60
[11146.534027]  [&amp;lt;ffffffff811e6f35&amp;gt;] mntput_no_expire+0xc5/0x120
[11146.534027]  [&amp;lt;ffffffff811e806f&amp;gt;] SyS_umount+0x9f/0x3c0
[11146.534027]  [&amp;lt;ffffffff81615309&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedHeader panelHeader&quot; style=&quot;border-bottom-width: 1px;&quot;&gt;&lt;b&gt;mgs_ir_notify&lt;/b&gt;&lt;/div&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[11146.534027] mgs_lustre_noti S ffff88007fd13680     0 28657      2 0x00000080
[11146.534027]  ffff880060a43ba8 0000000000000046 ffff880060a43fd8 0000000000013680
[11146.534027]  ffff880060a43fd8 0000000000013680 ffff88005b4b16c0 ffff88005fddba00
[11146.534027]  ffff88005b4b16c0 0000000000000000 ffffffffa09d4210 ffff88005b4b16c0
[11146.534027] Call Trace:
[11146.534027]  [&amp;lt;ffffffffa09d4210&amp;gt;] ? ldlm_completion_ast_async+0x300/0x300 [ptlrpc]
[11146.534027]  [&amp;lt;ffffffff8160a709&amp;gt;] schedule+0x29/0x70
[11146.534027]  [&amp;lt;ffffffffa09d4a8d&amp;gt;] ldlm_completion_ast+0x62d/0x910 [ptlrpc]
[11146.534027]  [&amp;lt;ffffffff810a9660&amp;gt;] ? wake_up_state+0x20/0x20
[11146.534027]  [&amp;lt;ffffffffa0cc8721&amp;gt;] mgs_completion_ast_generic+0xb1/0x1d0 [mgs]
[11146.534027]  [&amp;lt;ffffffffa0cc8853&amp;gt;] mgs_completion_ast_ir+0x13/0x20 [mgs]
[11146.534027]  [&amp;lt;ffffffffa09d7150&amp;gt;] ldlm_cli_enqueue_local+0x230/0x940 [ptlrpc]
[11146.534027]  [&amp;lt;ffffffffa0cc8840&amp;gt;] ? mgs_completion_ast_generic+0x1d0/0x1d0 [mgs]
[11146.534027]  [&amp;lt;ffffffffa09d9f40&amp;gt;] ? ldlm_blocking_ast_nocheck+0x310/0x310 [ptlrpc]
[11146.534027]  [&amp;lt;ffffffffa0ccfdcc&amp;gt;] mgs_revoke_lock+0x1dc/0x360 [mgs]
[11146.534027]  [&amp;lt;ffffffffa09d9f40&amp;gt;] ? ldlm_blocking_ast_nocheck+0x310/0x310 [ptlrpc]
[11146.534027]  [&amp;lt;ffffffffa0cc8840&amp;gt;] ? mgs_completion_ast_generic+0x1d0/0x1d0 [mgs]
[11146.534027]  [&amp;lt;ffffffffa0cece4a&amp;gt;] mgs_ir_notify+0x14a/0x2b0 [mgs]
[11146.534027]  [&amp;lt;ffffffff810a9660&amp;gt;] ? wake_up_state+0x20/0x20
[11146.534027]  [&amp;lt;ffffffffa0cecd00&amp;gt;] ? lprocfs_ir_set_state+0x170/0x170 [mgs]
[11146.534027]  [&amp;lt;ffffffff810973af&amp;gt;] kthread+0xcf/0xe0
[11146.534027]  [&amp;lt;ffffffff810972e0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[11146.534027]  [&amp;lt;ffffffff81615258&amp;gt;] ret_from_fork+0x58/0x90
[11146.534027]  [&amp;lt;ffffffff810972e0&amp;gt;] ? kthread_create_on_node+0x140/0x140
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="136679" author="jay" created="Thu, 17 Dec 2015 07:27:07 +0000"  >&lt;p&gt;Hi Bobijam,&lt;/p&gt;

&lt;p&gt;Do you know which lock is this process waiting for?&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
umount        D 0000000000000000     0 24629  24628 0x00000080
 ffff880059e2bb48 0000000000000086 0000000000000000 00000000000708b7
 0000603500000000 000000ac00000000 00001c1fd9b9c014 ffff880059e2bb98
 ffff880059e2bb58 0000000101d3458a ffff880076ee3ad8 ffff880059e2bfd8
Call Trace:
 [&amp;lt;ffffffff8153a756&amp;gt;] __mutex_lock_slowpath+0x96/0x210
 [&amp;lt;ffffffff8153a27b&amp;gt;] mutex_lock+0x2b/0x50
 [&amp;lt;ffffffffa02cb30d&amp;gt;] mgc_process_config+0x1dd/0x1210 [mgc]
 [&amp;lt;ffffffffa0476b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
 [&amp;lt;ffffffffa07fe28d&amp;gt;] obd_process_config.clone.0+0x8d/0x2e0 [obdclass]
 [&amp;lt;ffffffffa0476b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
 [&amp;lt;ffffffffa08024c2&amp;gt;] lustre_end_log+0x262/0x6a0 [obdclass]
 [&amp;lt;ffffffffa082efb1&amp;gt;] server_put_super+0x911/0xed0 [obdclass]
 [&amp;lt;ffffffff811b0116&amp;gt;] ? invalidate_inodes+0xf6/0x190
 [&amp;lt;ffffffff8119437b&amp;gt;] generic_shutdown_super+0x5b/0xe0
 [&amp;lt;ffffffff81194466&amp;gt;] kill_anon_super+0x16/0x60
 [&amp;lt;ffffffffa07fa096&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
 [&amp;lt;ffffffff81194c07&amp;gt;] deactivate_super+0x57/0x80
 [&amp;lt;ffffffff811b4a7f&amp;gt;] mntput_no_expire+0xbf/0x110
 [&amp;lt;ffffffff811b55cb&amp;gt;] sys_umount+0x7b/0x3a0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It looks like the lock cancellation was blocked by this lock somehow.&lt;/p&gt;</comment>
                            <comment id="136680" author="bobijam" created="Thu, 17 Dec 2015 07:46:03 +0000"  >&lt;p&gt;but in the MDS debug log, the same process later shows a different backtrace, and hung in that situation ever since. &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;09:25:21:Lustre: lock timed out (enqueued at 1445937602, 300s ago)
....
09:31:08:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
09:31:08:umount        D 0000000000000000     0 24629  24628 0x00000080
09:31:08: ffff880059e2b948 0000000000000086 0000000000000000 ffffffffa046a27b
09:31:08: 00000000562f42f5 ffff8800378fd480 00001c639f546de4 ffff880052840c20
09:31:08: ffff8800378fd480 0000000101d7b690 ffff880076ee3ad8 ffff880059e2bfd8
09:31:08:Call Trace:
09:31:08: [&amp;lt;ffffffffa046a27b&amp;gt;] ? cfs_set_ptldebug_header+0x2b/0xc0 [libcfs]
09:31:08: [&amp;lt;ffffffff81539cd5&amp;gt;] schedule_timeout+0x215/0x2e0
09:31:08: [&amp;lt;ffffffff81539953&amp;gt;] wait_for_common+0x123/0x180
09:31:08: [&amp;lt;ffffffff810672b0&amp;gt;] ? default_wake_function+0x0/0x20
09:31:08: [&amp;lt;ffffffff81539a6d&amp;gt;] wait_for_completion+0x1d/0x20
09:31:08: [&amp;lt;ffffffffa0640fb1&amp;gt;] mgs_ir_fini_fs+0x1e1/0x340 [mgs]
09:31:08: [&amp;lt;ffffffffa0637348&amp;gt;] mgs_free_fsdb+0x48/0xf40 [mgs]
09:31:08: [&amp;lt;ffffffffa063828f&amp;gt;] mgs_cleanup_fsdb_list+0x4f/0x70 [mgs]
09:31:08: [&amp;lt;ffffffffa06209a0&amp;gt;] mgs_device_fini+0x120/0x5b0 [mgs]
09:31:08: [&amp;lt;ffffffffa07f3d42&amp;gt;] class_cleanup+0x572/0xd20 [obdclass]
09:31:08: [&amp;lt;ffffffffa07d4906&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
09:31:08: [&amp;lt;ffffffffa07f63c6&amp;gt;] class_process_config+0x1ed6/0x2830 [obdclass]
09:31:08: [&amp;lt;ffffffffa0476b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
09:31:08: [&amp;lt;ffffffffa07f71df&amp;gt;] class_manual_cleanup+0x4bf/0x8e0 [obdclass]
09:31:08: [&amp;lt;ffffffffa07d4906&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
09:31:08: [&amp;lt;ffffffffa082f374&amp;gt;] server_put_super+0xcd4/0xed0 [obdclass]
09:31:08: [&amp;lt;ffffffff811b0116&amp;gt;] ? invalidate_inodes+0xf6/0x190
09:31:08: [&amp;lt;ffffffff8119437b&amp;gt;] generic_shutdown_super+0x5b/0xe0
09:31:08: [&amp;lt;ffffffff81194466&amp;gt;] kill_anon_super+0x16/0x60
09:31:08: [&amp;lt;ffffffffa07fa096&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
09:31:08: [&amp;lt;ffffffff81194c07&amp;gt;] deactivate_super+0x57/0x80
09:31:09: [&amp;lt;ffffffff811b4a7f&amp;gt;] mntput_no_expire+0xbf/0x110
09:31:09: [&amp;lt;ffffffff811b55cb&amp;gt;] sys_umount+0x7b/0x3a0
09:31:09: [&amp;lt;ffffffff8100b0d2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="136741" author="jay" created="Thu, 17 Dec 2015 18:10:23 +0000"  >&lt;p&gt;That&apos;s a different issue. The MGS should cancel the lock after lock expiration.&lt;/p&gt;</comment>
                            <comment id="136832" author="bobijam" created="Fri, 18 Dec 2015 09:10:40 +0000"  >&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa02cb30d&amp;gt;&amp;#93;&lt;/span&gt; mgc_process_config+0x1dd/0x1210 &lt;span class=&quot;error&quot;&gt;&amp;#91;mgc&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;locates&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeHeader panelHeader&quot; style=&quot;border-bottom-width: 1px;&quot;&gt;&lt;b&gt;config_log_end()&lt;/b&gt;&lt;/div&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (cld_recover) {
                mutex_lock(&amp;amp;cld_recover-&amp;gt;cld_lock);          &lt;span class=&quot;code-comment&quot;&gt;// -----------&amp;gt; here
&lt;/span&gt;                cld_recover-&amp;gt;cld_stopping = 1;
                mutex_unlock(&amp;amp;cld_recover-&amp;gt;cld_lock);
                config_log_put(cld_recover);
        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="136919" author="standan" created="Fri, 18 Dec 2015 22:39:11 +0000"  >&lt;p&gt;Another instance for EL6.7 Server/EL6.7 Client - ZFS&lt;br/&gt;
Master, build# 3270&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/925fea10-a275-11e5-bdef-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/925fea10-a275-11e5-bdef-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136933" author="standan" created="Fri, 18 Dec 2015 23:24:51 +0000"  >&lt;p&gt;Another instance forEL7.1 Server/EL7.1 Client - ZFS&lt;br/&gt;
Master, build# 3264&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/42ea859e-a135-11e5-83b8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/42ea859e-a135-11e5-83b8-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136944" author="standan" created="Sat, 19 Dec 2015 00:02:29 +0000"  >&lt;p&gt;Another instance for EL7.1 Server/EL7.1 Client - DNE&lt;br/&gt;
Master , Build# 3270&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/69d69aba-a26d-11e5-bdef-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/69d69aba-a26d-11e5-bdef-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="136974" author="jay" created="Sat, 19 Dec 2015 19:29:28 +0000"  >&lt;p&gt;Hi Bobijam,&lt;/p&gt;

&lt;p&gt;Please investigate why lock log end could be blocked there, I guess there must be a deadlock/live case in lock cancellation code and log end code.&lt;/p&gt;

&lt;p&gt;For the hung issue, we should make a fix in mgs_completion_ast_generic() where if ldlm_completion_ast() returned failure due to timeout, it should cancel the lock by itself.&lt;/p&gt;

&lt;p&gt;Jinshan&lt;/p&gt;</comment>
                            <comment id="137385" author="bobijam" created="Thu, 24 Dec 2015 02:47:16 +0000"  >&lt;p&gt;Jinshan,&lt;/p&gt;

&lt;p&gt;I&apos;m wondering whether that server does not need IR log, since it only client need it.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c
index ba1a2e1..0f392dd 100644
--- a/lustre/mgc/mgc_request.c
+++ b/lustre/mgc/mgc_request.c
@@ -360,7 +360,8 @@ &lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; config_log_add(struct obd_device obd, &lt;span class=&quot;code-object&quot;&gt;char&lt;/span&gt; logname,
        cld-&amp;gt;cld_params = params_cld;
 
         LASSERT(lsi-&amp;gt;lsi_lmd);
-        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (!(lsi-&amp;gt;lsi_lmd-&amp;gt;lmd_flags &amp;amp; LMD_FLG_NOIR)) {
+        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (!(lsi-&amp;gt;lsi_lmd-&amp;gt;lmd_flags &amp;amp; LMD_FLG_NOIR) &amp;amp;&amp;amp;
+           !lmd_is_client(lsi-&amp;gt;lsi_lmd)) {
                struct config_llog_data *recover_cld;
                ptr = strrchr(seclogname, &lt;span class=&quot;code-quote&quot;&gt;&apos;-&apos;&lt;/span&gt;);
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (ptr != NULL) {
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="137542" author="jay" created="Tue, 29 Dec 2015 01:50:19 +0000"  >&lt;p&gt;Hi Bobijam,&lt;/p&gt;

&lt;p&gt;Servers need to be notified too - for example, MDTs need to reconnect to restarted OSTs.&lt;/p&gt;</comment>
                            <comment id="137548" author="bobijam" created="Tue, 29 Dec 2015 04:57:45 +0000"  >&lt;p&gt;I&apos;m wondering whether this has to do with the OBD_CONNECT_MNE_SWAB/OBD_CONNECT_MDS_MDS change, esp. for commit bee9c1897677473f12c0b807edd3e8fec452bc32, which &quot;Do not evict MDS-MDS connection&quot; while &quot;The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS * connection&quot;.  &lt;/p&gt;

&lt;p&gt;The umount is waiting for a config_llog_data::cld_lock mutex, which it is hold by the re-queue thread and the thread is waiting for the mgc lock get granted or cancelled (ldlm_completion_ast()), and that never happens. &lt;/p&gt;</comment>
                            <comment id="137908" author="jay" created="Tue, 5 Jan 2016 06:43:23 +0000"  >&lt;p&gt;It&apos;s not related - this can only be seen on heterogeneous nodes.&lt;/p&gt;

&lt;p&gt;In that case, we can make a fix to destroy the lock in mgs_completion_ast_generic() if it times out for waiting lock completion.&lt;/p&gt;</comment>
                            <comment id="138109" author="jay" created="Wed, 6 Jan 2016 19:09:54 +0000"  >&lt;p&gt;I took a further look at this issue. It turned out that when ldlm lock completion timed out in the ldlm_completion_ast() it doesn&apos;t actually time out the locks, instead it just prints an error message and wait forever.&lt;/p&gt;

&lt;p&gt;Once the completion AST times out for the MGS, it only destroys in flight RPC but leaves lock alone, this caused the stuck in this ticket.&lt;/p&gt;

&lt;p&gt;I will create a patch to fix the problems I have seen so far.&lt;/p&gt;</comment>
                            <comment id="138138" author="gerrit" created="Wed, 6 Jan 2016 21:43:53 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/17853&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17853&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; mgs: reprocess all locks at device fini&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 967963e6ef7ecb9eef140effbcec795612633e18&lt;/p&gt;</comment>
                            <comment id="138420" author="jaylan" created="Sat, 9 Jan 2016 00:39:16 +0000"  >&lt;p&gt;The above patch caused compilation error in b2_5_fe.&lt;/p&gt;

&lt;p&gt;The patch made a call to ldlm_namespace_free_prior() from lustre/mgs/mgs_handler.c, but&lt;br/&gt;
that symbol is not known to the scope of mgs_handler.c. That symbol was not declared in&lt;br/&gt;
ldlm/ldlm_internal.h, which is not included in mgs_handler.c.&lt;/p&gt;</comment>
                            <comment id="138643" author="jay" created="Tue, 12 Jan 2016 06:49:53 +0000"  >&lt;p&gt;Hi Jay,&lt;/p&gt;

&lt;p&gt;I pushed a new patch that should be able to be applied to b2_5_fe. Please give it a try.&lt;/p&gt;</comment>
                            <comment id="138730" author="jaylan" created="Tue, 12 Jan 2016 20:20:29 +0000"  >&lt;p&gt;Hi Jinshan,&lt;/p&gt;

&lt;p&gt;The new patch is even further away from b2_5_fe &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/sad.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;br/&gt;
My code does not even have ldlm_cancel_lock_for_export(). The version I have in b2_5_fe is&lt;br/&gt;
ldlm_cancel_locks_for_export(). &lt;/p&gt;</comment>
                            <comment id="138772" author="bobijam" created="Wed, 13 Jan 2016 05:26:08 +0000"  >&lt;p&gt;port for b2_5_fe is at &lt;a href=&quot;http://review.whamcloud.com/17976&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17976&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139199" author="jay" created="Mon, 18 Jan 2016 19:22:27 +0000"  >&lt;p&gt;patch set 2 failed on maloo test but it could hit a different problem. The original problem is that the node got stuck at cleanup but this time it moved forward. I will take a further look.&lt;/p&gt;</comment>
                            <comment id="139278" author="sarah" created="Tue, 19 Jan 2016 18:51:04 +0000"  >&lt;p&gt;client and server: lustre-master # 3305 RHEL6.7 &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/798b7bb0-bba4-11e5-8506-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/798b7bb0-bba4-11e5-8506-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139310" author="jay" created="Tue, 19 Jan 2016 21:38:18 +0000"  >&lt;p&gt;I couldn&apos;t see any log from the maloo result. Is there any way to retain the log after test is complete? Or we can reproduce it on our test nodes manually.&lt;/p&gt;</comment>
                            <comment id="139937" author="jgmitter" created="Mon, 25 Jan 2016 17:42:23 +0000"  >&lt;p&gt;Sarah,&lt;br/&gt;
Are you able to help Jinshan test manually and obtain the needed logs?&lt;br/&gt;
Thanks.&lt;br/&gt;
Joe&lt;/p&gt;</comment>
                            <comment id="139942" author="sarah" created="Mon, 25 Jan 2016 18:07:04 +0000"  >&lt;p&gt;Hi Joe,&lt;/p&gt;

&lt;p&gt;Working on it right now.&lt;br/&gt;
for test only patch&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/18130/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18130/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="140001" author="sarah" created="Tue, 26 Jan 2016 07:36:20 +0000"  >&lt;p&gt;Xiong,&lt;br/&gt;
here is the logs for your patch set 3&lt;br/&gt;
please see them on shadow under this dir:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[w3liu@shadow-1 ~]$ ls /home/autotest/logdir/test_logs/2016-01-25/lustre-reviews-el6_7-x86_64--review-dne-part-1--1_6_1__37054__-70190851309880-224528 | grep 26
replay-dual.test_26.console.shadow-4vm1.log
replay-dual.test_26.console.shadow-4vm2.log
replay-dual.test_26.console.shadow-4vm3.log
replay-dual.test_26.console.shadow-4vm4.log
replay-dual.test_26.console.shadow-4vm8.log
replay-dual.test_26.test_log.shadow-4vm1.log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="140074" author="sarah" created="Tue, 26 Jan 2016 18:03:46 +0000"  >&lt;p&gt;I put all logs into one file, please check&lt;/p&gt;</comment>
                            <comment id="140143" author="jay" created="Wed, 27 Jan 2016 01:01:07 +0000"  >&lt;p&gt;Finally I could reproduce the problem on my own test cluster at home. It turned out the client running dbench was evicted. Please check the log in attachment.&lt;/p&gt;

&lt;p&gt;Before it was evicted, it complained that &apos;version mismatch during replay&apos;, it looks like that the object was changed. I don&apos;t know this would happen.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: 2818:0:(client.c:2886:ptlrpc_replay_interpret()) @@@ Version mismatch during replay
  req@ffff8800383a3c80 x1524469815762040/t360777260454(360777260454) o36-&amp;gt;lustre-MDT0000-mdc-ffff88005a4a7800@172.16.0.128@tcp:12/10 lens 616/424 e 0 to 0 dl 1453855303 ref 2 fl Interpret:R/4/0 rc -75/-75
Lustre: 2818:0:(client.c:2886:ptlrpc_replay_interpret()) @@@ Version mismatch during replay
  req@ffff88003d87c9c0 x1524469815775168/t360777261048(360777261048) o101-&amp;gt;lustre-MDT0000-mdc-ffff88005a4a7800@172.16.0.128@tcp:12/10 lens 808/544 e 0 to 0 dl 1453855304 ref 2 fl Interpret:R/4/0 rc -75/-75
Lustre: 2818:0:(client.c:2886:ptlrpc_replay_interpret()) Skipped 1 previous similar message
Lustre: 2818:0:(client.c:2886:ptlrpc_replay_interpret()) @@@ Version mismatch during replay
  req@ffff880037ca60c0 x1524469815787324/t360777261724(360777261724) o36-&amp;gt;lustre-MDT0000-mdc-ffff88005a4a7800@172.16.0.128@tcp:12/10 lens 616/424 e 0 to 0 dl 1453855306 ref 2 fl Interpret:R/4/0 rc -75/-75
Lustre: DEBUG MARKER: mdc.lustre-MDT0000-mdc-*.mds_server_uuid in FULL state after 9 sec
Lustre: 2818:0:(import.c:1339:completed_replay_interpret()) lustre-MDT0000-mdc-ffff88005a4a7800: version recovery fails, reconnecting
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="140146" author="jay" created="Wed, 27 Jan 2016 01:05:39 +0000"  >&lt;p&gt;Actually the problem I saw was irrelevant to this ticket at all because I could see the same problem without the patch applied.&lt;/p&gt;</comment>
                            <comment id="140169" author="jay" created="Wed, 27 Jan 2016 06:51:49 +0000"  >&lt;p&gt;Hi Tappro, can you please take a look at the logs I posted above?&lt;/p&gt;</comment>
                            <comment id="140171" author="di.wang" created="Wed, 27 Jan 2016 07:04:29 +0000"  >&lt;p&gt;Hmm, this usually means recovery failed (most likely fid and name are not match IMHO). So you mean this failure is related with the umount timeout or not? If not, do you mind if I create a new ticket here? I saw similar failures in other recovery tests as well? &lt;/p&gt;</comment>
                            <comment id="140173" author="di.wang" created="Wed, 27 Jan 2016 08:10:01 +0000"  >&lt;p&gt;Though this might not be the real cause of &quot;version mismatch&quot;,  but replay-dual.sh 26 can pass with the fix of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7716&quot; title=&quot;Do not do subdir check if source and target are in the same directory&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7716&quot;&gt;&lt;del&gt;LU-7716&lt;/del&gt;&lt;/a&gt; constantly in my local test.&lt;/p&gt;</comment>
                            <comment id="140246" author="jay" created="Wed, 27 Jan 2016 18:11:32 +0000"  >&lt;p&gt;Hi Di, the problem I mentioned above it&apos;s irrelevant to unmount issue we&apos;re addressing in this ticket. Please go ahead filing a new ticket. The problem is the testing can&apos;t be passed at all. It keeps hitting that issue.&lt;/p&gt;</comment>
                            <comment id="140247" author="jay" created="Wed, 27 Jan 2016 18:12:37 +0000"  >&lt;p&gt;btw, are you seeing the same issue as I described above? &lt;/p&gt;</comment>
                            <comment id="140252" author="di.wang" created="Wed, 27 Jan 2016 18:24:17 +0000"  >&lt;p&gt;ah, I do not know the umount issue, and can not reproduce it locally neither. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/sad.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;  But I can see the &quot;Version mismatch&quot; issue as you described above. which seems related with rename recovery. &lt;/p&gt;

&lt;p&gt;I posted the patch on &lt;a href=&quot;http://review.whamcloud.com/18172&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/18172&lt;/a&gt;, and &quot;version mismatch&quot; seems disappear with this patch, at least in my local tests. But I probably need dig it further here. &lt;/p&gt;</comment>
                            <comment id="140259" author="jay" created="Wed, 27 Jan 2016 18:50:02 +0000"  >&lt;p&gt;indeed, your patch helped a lot on my new test. I only see one failure after that and there was no eviction but I didn&apos;t catch the log. I tried a lot times to reproduce it but failed - may be just a hiccup somewhere.&lt;/p&gt;

&lt;p&gt;I&apos;ll rebase my patch on Di&apos;s and see how it goes.&lt;/p&gt;</comment>
                            <comment id="140420" author="jaylan" created="Thu, 28 Jan 2016 19:26:58 +0000"  >&lt;p&gt;Zhenyu Xu ported an earlier version (patch set 3, IIRC) of #17853 for b2_5_fe at&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/17976&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17976&lt;/a&gt; (see comment at 12/Jan/16 9:26 PM)&lt;/p&gt;

&lt;p&gt;Could someone from Intel update 17976 to the latest patch set #4 for us? Thanks!&lt;/p&gt;</comment>
                            <comment id="140608" author="jay" created="Sat, 30 Jan 2016 01:35:37 +0000"  >&lt;p&gt;Hi Jay,&lt;/p&gt;

&lt;p&gt;The patch set 3 of 17853 is the same thing as patch 17976.&lt;/p&gt;</comment>
                            <comment id="141027" author="standan" created="Wed, 3 Feb 2016 18:21:48 +0000"  >&lt;p&gt;Another instance failing with the same error as above for tag 2.7.66 for FULL - EL6.7 Server/EL6.7 Client - DNE , master build# 3314.&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/86ca0268-ca83-11e5-9215-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/86ca0268-ca83-11e5-9215-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Encountered another instance for tag 2.7.66 for FULL - EL7.1 Server/EL7.1 Client , master , build# 3314.&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b91000ec-ca88-11e5-84d3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b91000ec-ca88-11e5-84d3-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another failure for master : Tag 2.7.66 FULL - EL7.1 Server/SLES11 SP3 Client, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a51072dc-ca7b-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a51072dc-ca7b-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141212" author="jay" created="Thu, 4 Feb 2016 18:10:48 +0000"  >&lt;p&gt;For the failure at this test results:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/10bb2d7e-cb44-11e5-b3e8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/10bb2d7e-cb44-11e5-b3e8-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It shows the error message: &quot;dbench 31895 missing&quot;, but actually the dbench process(dbench_26_pid) had PID as 7185. There is no error message in the console log either. I don&apos;t know where the error came from.&lt;/p&gt;</comment>
                            <comment id="141843" author="standan" created="Wed, 10 Feb 2016 21:47:00 +0000"  >&lt;p&gt;Another instance found for interop tag 2.7.66 - EL7 Server/2.7.1 Client, build# 3316&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d59d88b4-cc91-11e5-b80c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d59d88b4-cc91-11e5-b80c-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141888" author="standan" created="Wed, 10 Feb 2016 22:52:54 +0000"  >&lt;p&gt;Another instance found for Full tag 2.7.66 - EL6.7 Server/EL6.7 Client, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/64aef89c-ca6e-11e5-9215-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/64aef89c-ca6e-11e5-9215-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL6.7 Server/EL6.7 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/999f5e28-cb47-11e5-a59a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/999f5e28-cb47-11e5-a59a-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL6.7 Server/EL6.7 Client - DNE, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/86ca0268-ca83-11e5-9215-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/86ca0268-ca83-11e5-9215-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL7.1 Server/EL6.7 Client, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/84997ede-ca91-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/84997ede-ca91-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL7.1 Server/EL7.1 Client, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b91000ec-ca88-11e5-84d3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b91000ec-ca88-11e5-84d3-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 -EL7.1 Server/SLES11 SP3 Client, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a51072dc-ca7b-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a51072dc-ca7b-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 -EL7.1 Server/EL7.1 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/de1df1ae-cb88-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/de1df1ae-cb88-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 -EL7.1 Server/EL7.1 Client - DNE, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a7b8000a-cac5-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a7b8000a-cac5-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="154547" author="adilger" created="Fri, 3 Jun 2016 07:30:12 +0000"  >&lt;p&gt;Not sure if &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7765&quot; title=&quot;replay-dual test 26 buggy redirection&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7765&quot;&gt;&lt;del&gt;LU-7765&lt;/del&gt;&lt;/a&gt; is the cause of this, but it can&apos;t be helping.&lt;/p&gt;</comment>
                            <comment id="154953" author="gerrit" created="Tue, 7 Jun 2016 18:42:30 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/20669&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20669&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: disable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: f521bf86fd77e3af72873281decdadc8fef6eb81&lt;/p&gt;</comment>
                            <comment id="155435" author="gerrit" created="Sat, 11 Jun 2016 09:46:39 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/20669/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20669/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: disable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 8b8b000eede1353a879b71d052a8a6f998431cdd&lt;/p&gt;</comment>
                            <comment id="158077" author="yujian" created="Fri, 8 Jul 2016 00:45:57 +0000"  >&lt;p&gt;On master branch, replay-vbr test 1b also hit the same failure:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/95d54048-41f6-11e6-a0ce-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/95d54048-41f6-11e6-a0ce-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d7876c2c-41bb-11e6-bbf5-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d7876c2c-41bb-11e6-bbf5-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/470a0382-541f-11e6-88a7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/470a0382-541f-11e6-88a7-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="185772" author="lixi" created="Wed, 22 Feb 2017 13:49:25 +0000"  >&lt;p&gt;Hi, any conclusion on the patch &lt;a href=&quot;https://review.whamcloud.com/#/c/17853/?&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/17853/?&lt;/a&gt; We are hitting this problem really frequently. Would you please accelerate the review of that patch?&lt;/p&gt;</comment>
                            <comment id="193062" author="pjones" created="Fri, 21 Apr 2017 18:04:02 +0000"  >&lt;p&gt;Li Xi&lt;/p&gt;

&lt;p&gt;It would probably be best to open a DDN ticket for the issue you are hitting&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="196944" author="jamesanunez" created="Wed, 24 May 2017 17:40:47 +0000"  >&lt;p&gt;Using patch 17853, we are still seeing replay-dual test 26 hangs. The patch I used to test this is at &lt;a href=&quot;https://review.whamcloud.com/#/c/27256/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/27256/&lt;/a&gt; and the replay-dual failure is at  &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3d171e98-408f-11e7-b3f6-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3d171e98-408f-11e7-b3f6-5254006e85c2&lt;/a&gt;. &lt;/p&gt;

&lt;p&gt;Looking at the MDS1 console, we can see that the MDS hangs in umount&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;07:36:47:[ 4392.087661] Lustre: 5245:0:(client.c:2114:ptlrpc_expire_one_request()) Skipped 24 previous similar messages
07:36:47:[ 4425.382238] LustreError: 137-5: lustre-MDT0000_UUID: not available for connect from 10.2.4.147@tcp (no target). If you are running an HA pair check that the target is mounted on the other server.
07:36:47:[ 4425.387696] LustreError: Skipped 901 previous similar messages
07:36:47:[ 4440.091059] INFO: task umount:13451 blocked for more than 120 seconds.
07:36:47:[ 4440.093651] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
07:36:47:[ 4440.096310] umount          D ffff88005f304600     0 13451  13450 0x00000080
07:36:47:[ 4440.098974]  ffff880065df3a20 0000000000000082 ffff8800663cbec0 ffff880065df3fd8
07:36:47:[ 4440.101633]  ffff880065df3fd8 ffff880065df3fd8 ffff8800663cbec0 ffff88005f304740
07:36:47:[ 4440.104257]  ffff88005f304748 7fffffffffffffff ffff8800663cbec0 ffff88005f304600
07:36:47:[ 4440.106805] Call Trace:
07:36:47:[ 4440.108922]  [&amp;lt;ffffffff8168c3c9&amp;gt;] schedule+0x29/0x70
07:36:47:[ 4440.111173]  [&amp;lt;ffffffff81689e29&amp;gt;] schedule_timeout+0x239/0x2c0
07:36:47:[ 4440.113525]  [&amp;lt;ffffffff811dafb9&amp;gt;] ? discard_slab+0x39/0x50
07:36:47:[ 4440.115807]  [&amp;lt;ffffffff811dcaaa&amp;gt;] ? __slab_free+0x2ca/0x2f0
07:36:47:[ 4440.118100]  [&amp;lt;ffffffff8168c7a6&amp;gt;] wait_for_completion+0x116/0x170
07:36:47:[ 4440.120423]  [&amp;lt;ffffffff810c54c0&amp;gt;] ? wake_up_state+0x20/0x20
07:36:47:[ 4440.122724]  [&amp;lt;ffffffffa0d8205e&amp;gt;] mgs_ir_fini_fs+0x27e/0x2f0 [mgs]
07:36:47:[ 4440.125044]  [&amp;lt;ffffffffa0d753f3&amp;gt;] mgs_put_fsdb+0x53/0x910 [mgs]
07:36:47:[ 4440.127330]  [&amp;lt;ffffffffa0d75e3c&amp;gt;] mgs_cleanup_fsdb_list+0x6c/0x90 [mgs]
07:36:47:[ 4440.129664]  [&amp;lt;ffffffffa0d59a97&amp;gt;] mgs_device_fini+0x97/0x5b0 [mgs]
07:36:47:[ 4440.132081]  [&amp;lt;ffffffffa087d584&amp;gt;] class_cleanup+0x784/0xd90 [obdclass]
07:36:47:[ 4440.134416]  [&amp;lt;ffffffffa088051d&amp;gt;] class_process_config+0x225d/0x2b10 [obdclass]
07:36:47:[ 4440.136806]  [&amp;lt;ffffffffa0678d87&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
07:36:47:[ 4440.139136]  [&amp;lt;ffffffffa0880ebf&amp;gt;] class_manual_cleanup+0xef/0x810 [obdclass]
07:36:47:[ 4440.141491]  [&amp;lt;ffffffffa08b1580&amp;gt;] server_put_super+0xb20/0xcd0 [obdclass]
07:36:47:[ 4440.143755]  [&amp;lt;ffffffff81200d22&amp;gt;] generic_shutdown_super+0x72/0xf0
07:36:47:[ 4440.145932]  [&amp;lt;ffffffff812010f2&amp;gt;] kill_anon_super+0x12/0x20
07:36:47:[ 4440.148033]  [&amp;lt;ffffffffa0884a92&amp;gt;] lustre_kill_super+0x32/0x50 [obdclass]
07:36:47:[ 4440.150217]  [&amp;lt;ffffffff812014a9&amp;gt;] deactivate_locked_super+0x49/0x60
07:36:47:[ 4440.152331]  [&amp;lt;ffffffff81201aa6&amp;gt;] deactivate_super+0x46/0x60
07:36:47:[ 4440.154363]  [&amp;lt;ffffffff8121eef5&amp;gt;] mntput_no_expire+0xc5/0x120
07:36:47:[ 4440.156357]  [&amp;lt;ffffffff81220030&amp;gt;] SyS_umount+0xa0/0x3b0
07:36:47:[ 4440.158276]  [&amp;lt;ffffffff816973c9&amp;gt;] system_call_fastpath+0x16/0x1b
07:36:47:[ 4560.160066] INFO: task umount:13451 blocked for more than 120 seconds.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</comment>
                            <comment id="203847" author="gerrit" created="Sat, 29 Jul 2017 00:02:20 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/17853/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/17853/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; mgs: reprocess all locks at device fini&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 2dc19f20ba9fcc1bcac6ae7ee5169ce10caab882&lt;/p&gt;</comment>
                            <comment id="203881" author="pjones" created="Sat, 29 Jul 2017 13:35:27 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="204219" author="gerrit" created="Wed, 2 Aug 2017 16:18:08 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28323&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28323&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; mgs: reprocess all locks at device fini&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 3eed63b655e18de53540b7aaa33aa69e17f78257&lt;/p&gt;</comment>
                            <comment id="205037" author="gerrit" created="Thu, 10 Aug 2017 16:26:05 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/28323/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28323/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; mgs: reprocess all locks at device fini&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 6df5f062727fc216072bc8daa7dfbc545876a2b0&lt;/p&gt;</comment>
                            <comment id="216648" author="jamesanunez" created="Mon, 18 Dec 2017 20:56:56 +0000"  >&lt;p&gt;replay-dual test 26 is still on the ALWAYS_EXCEPT list and need to be removed to fully test that the patches fix this issue. &lt;/p&gt;</comment>
                            <comment id="217281" author="gerrit" created="Sun, 31 Dec 2017 23:30:15 +0000"  >&lt;p&gt;James Casper (jamesx.casper@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30677&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30677&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; test: Remove replay-dual test_26 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 09d341f5fb7b09283cb4515b17f10905564fdf4c&lt;/p&gt;</comment>
                            <comment id="217762" author="gerrit" created="Tue, 9 Jan 2018 05:35:15 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30677/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30677/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; test: Remove replay-dual test_26 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: bda752429ad6e0d7be0eb76c8abe1bf8561ac32f&lt;/p&gt;</comment>
                            <comment id="217816" author="gerrit" created="Tue, 9 Jan 2018 16:17:19 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30796&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30796&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; test: Remove replay-dual test_26 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 3872c328fa09d5fbde801e5e88e783ae27bf99a6&lt;/p&gt;</comment>
                            <comment id="218787" author="yujian" created="Mon, 22 Jan 2018 07:42:49 +0000"  >&lt;p&gt;By searching on Maloo, I found replay-dual test 26 hung at least 17 times in the last week.&lt;/p&gt;

&lt;p&gt;Here is a failure instance on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/358e9718-fda5-11e7-a7cd-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/358e9718-fda5-11e7-a7cd-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Console log on MDS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[51864.393931] LustreError: 14617:0:(qsd_reint.c:56:qsd_reint_completion()) lustre-MDT0000: failed to enqueue global quota lock, glb fid:[0x200000006:0x1010000:0x0], rc:-5
[51864.395871] LustreError: 14617:0:(qsd_reint.c:56:qsd_reint_completion()) Skipped 1 previous similar message
[51960.236441] INFO: task umount:14605 blocked for more than 120 seconds.
[51960.237288] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[51960.238163] umount          D ffff88005c01cf10     0 14605  14604 0x00000080
[51960.238987] Call Trace:
[51960.239287]  [&amp;lt;ffffffff816ac5d9&amp;gt;] schedule_preempt_disabled+0x29/0x70
[51960.240077]  [&amp;lt;ffffffff816aa407&amp;gt;] __mutex_lock_slowpath+0xc7/0x1d0
[51960.241003]  [&amp;lt;ffffffff816a981f&amp;gt;] mutex_lock+0x1f/0x2f
[51960.241719]  [&amp;lt;ffffffffc069c3e7&amp;gt;] mgc_process_config+0x207/0x13f0 [mgc]
[51960.242608]  [&amp;lt;ffffffffc0950a26&amp;gt;] obd_process_config.constprop.14+0x76/0x230 [obdclass]
[51960.243674]  [&amp;lt;ffffffffc0638d47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[51960.244490]  [&amp;lt;ffffffffc093d639&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[51960.245406]  [&amp;lt;ffffffffc0951cbf&amp;gt;] lustre_end_log+0x1ff/0x550 [obdclass]
[51960.246218]  [&amp;lt;ffffffffc097d15e&amp;gt;] server_put_super+0x7de/0xcd0 [obdclass]
[51960.247052]  [&amp;lt;ffffffff812054d2&amp;gt;] generic_shutdown_super+0x72/0x100
[51960.247873]  [&amp;lt;ffffffff812058a2&amp;gt;] kill_anon_super+0x12/0x20
[51960.248579]  [&amp;lt;ffffffffc09500c2&amp;gt;] lustre_kill_super+0x32/0x50 [obdclass]
[51960.249417]  [&amp;lt;ffffffff81205c59&amp;gt;] deactivate_locked_super+0x49/0x60
[51960.250180]  [&amp;lt;ffffffff812063c6&amp;gt;] deactivate_super+0x46/0x60
[51960.250878]  [&amp;lt;ffffffff8122376f&amp;gt;] cleanup_mnt+0x3f/0x80
[51960.251555]  [&amp;lt;ffffffff81223802&amp;gt;] __cleanup_mnt+0x12/0x20
[51960.252269]  [&amp;lt;ffffffff810aee05&amp;gt;] task_work_run+0xc5/0xf0
[51960.252967]  [&amp;lt;ffffffff8102ab52&amp;gt;] do_notify_resume+0x92/0xb0
[51960.253708]  [&amp;lt;ffffffff816b8d37&amp;gt;] int_signal+0x12/0x17
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="227329" author="mdiep" created="Fri, 4 May 2018 15:39:22 +0000"  >&lt;p&gt;+1 on b2_10&#160;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/07ca0dd2-43d0-11e8-960d-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/07ca0dd2-43d0-11e8-960d-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="229976" author="jamesanunez" created="Thu, 5 Jul 2018 19:57:06 +0000"  >&lt;p&gt;We have another case of replay-dual test 26 failing in umount. The interesting thing here is that the test just fails and continues with the next test. The test does not time out/hang due to this failure.&lt;/p&gt;

&lt;p&gt;The console logs of the MDS (vm4) have the same call trace in umount as above. &lt;/p&gt;

&lt;p&gt;Logs are at: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/f594d332-8023-11e8-97ff-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/f594d332-8023-11e8-97ff-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="231085" author="gerrit" created="Mon, 30 Jul 2018 17:54:37 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/32902&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32902&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: stop running replay-dual test 26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 668755f74905ae3e73f46b6eea82f54d5cdc4864&lt;/p&gt;</comment>
                            <comment id="231735" author="gerrit" created="Thu, 9 Aug 2018 18:20:07 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/32902/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32902/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: stop running replay-dual test 26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 77fc116fb230f330761767e240882c2141b30c1a&lt;/p&gt;</comment>
                            <comment id="231947" author="jamesanunez" created="Tue, 14 Aug 2018 21:07:05 +0000"  >&lt;p&gt;Looking at the MDS console logs, the following test sessions have essentially the same stack trace as what is described in this ticket. Looking at the kernel crash logs shows oom-killer.&lt;/p&gt;

&lt;p&gt;For &lt;a href=&quot;https://testing.whamcloud.com/test_sets/a498de80-9ade-11e8-8ee3-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a498de80-9ade-11e8-8ee3-52540065bddc&lt;/a&gt;, the kernel crash log shows that tar envokes the oom-killer:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[60604.459486] tar invoked oom-killer: gfp_mask=0x200da, order=0, oom_score_adj=0
[60604.460400] tar cpuset=/ mems_allowed=0
[60604.460823] CPU: 0 PID: 16324 Comm: tar Kdump: loaded Tainted: G           OE  ------------   3.10.0-862.9.1.el7.x86_64 #1
[60604.461874] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[60604.462439] Call Trace:
[60604.462724]  [&amp;lt;ffffffff81b0e84e&amp;gt;] dump_stack+0x19/0x1b
[60604.463220]  [&amp;lt;ffffffff81b0a1d0&amp;gt;] dump_header+0x90/0x229
[60604.463737]  [&amp;lt;ffffffff81b1badf&amp;gt;] ? notifier_call_chain+0x4f/0x70
[60604.464338]  [&amp;lt;ffffffff814c17b8&amp;gt;] ? __blocking_notifier_call_chain+0x58/0x70
[60604.465018]  [&amp;lt;ffffffff8159826e&amp;gt;] check_panic_on_oom+0x2e/0x60
[60604.465589]  [&amp;lt;ffffffff8159868b&amp;gt;] out_of_memory+0x23b/0x4f0
[60604.466124]  [&amp;lt;ffffffff8159f224&amp;gt;] __alloc_pages_nodemask+0xaa4/0xbb0
[60604.466735]  [&amp;lt;ffffffff815ec525&amp;gt;] alloc_pages_vma+0xb5/0x200
[60604.467279]  [&amp;lt;ffffffff815dae45&amp;gt;] __read_swap_cache_async+0x115/0x190
[60604.467886]  [&amp;lt;ffffffff815daee6&amp;gt;] read_swap_cache_async+0x26/0x60
[60604.468472]  [&amp;lt;ffffffff815dafc8&amp;gt;] swapin_readahead+0xa8/0x110
[60604.469034]  [&amp;lt;ffffffff815c5f37&amp;gt;] handle_pte_fault+0x777/0xc30
[60604.469601]  [&amp;lt;ffffffff815c7c3d&amp;gt;] handle_mm_fault+0x39d/0x9b0
[60604.470163]  [&amp;lt;ffffffff81525092&amp;gt;] ? from_kgid_munged+0x12/0x20
[60604.470717]  [&amp;lt;ffffffff81b1b557&amp;gt;] __do_page_fault+0x197/0x4f0
[60604.471260]  [&amp;lt;ffffffff81b1b996&amp;gt;] trace_do_page_fault+0x56/0x150
[60604.471829]  [&amp;lt;ffffffff81b1af22&amp;gt;] do_async_page_fault+0x22/0xf0
[60604.472405]  [&amp;lt;ffffffff81b17788&amp;gt;] async_page_fault+0x28/0x30
[60604.472938] Mem-Info:
[60604.473179] active_anon:0 inactive_anon:6 isolated_anon:0
 active_file:274 inactive_file:308 isolated_file:0
 unevictable:0 dirty:38 writeback:0 unstable:0
 slab_reclaimable:3259 slab_unreclaimable:38342
 mapped:181 shmem:12 pagetables:1345 bounce:0
 free:12914 free_pcp:0 free_cma:0
[60604.476120] Node 0 DMA free:7020kB min:416kB low:520kB high:624kB active_anon:4kB inactive_anon:0kB active_file:4kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15992kB managed:15908kB mlocked:0kB dirty:12kB writeback:0kB mapped:4kB shmem:48kB slab_reclaimable:132kB slab_unreclaimable:724kB kernel_stack:16kB pagetables:124kB unstable:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:1494 all_unreclaimable? yes
[60604.480017] lowmem_reserve[]: 0 1660 1660 1660
[60604.480573] Node 0 DMA32 free:44636kB min:44636kB low:55792kB high:66952kB active_anon:0kB inactive_anon:24kB active_file:1092kB inactive_file:1232kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:2080744kB managed:1704004kB mlocked:0kB dirty:140kB writeback:0kB mapped:720kB shmem:0kB slab_reclaimable:12904kB slab_unreclaimable:152644kB kernel_stack:2848kB pagetables:5256kB unstable:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:3668 all_unreclaimable? yes
[60604.484717] lowmem_reserve[]: 0 0 0 0
[60604.485180] Node 0 DMA: 9*4kB (UM) 5*8kB (UM) 10*16kB (M) 9*32kB (UM) 8*64kB (UM) 5*128kB (UM) 5*256kB (UM) 4*512kB (UM) 0*1024kB 1*2048kB (M) 0*4096kB = 7052kB
[60604.486948] Node 0 DMA32: 2654*4kB (EM) 2385*8kB (EM) 941*16kB (UM) 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 44752kB
[60604.488485] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
[60604.489308] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
[60604.490109] 303 total pagecache pages
[60604.490472] 6 pages in swap cache
[60604.490790] Swap cache stats: add 86869, delete 86863, find 18036/30348
[60604.491421] Free swap  = 3521276kB
[60604.491749] Total swap = 3671036kB
[60604.492082] 524184 pages RAM
[60604.492368] 0 pages HighMem/MovableOnly
[60604.492812] 94206 pages reserved
[60604.493137] [ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name
[60604.493889] [  333]     0   333     9868        0      22      108             0 systemd-journal
[60604.494776] [  359]     0   359    29149        0      26       79             0 lvmetad
[60604.495542] [  366]     0   366    11101        1      23      147         -1000 systemd-udevd
[60604.496357] [  453]     0   453    13877        0      26      119         -1000 auditd
[60604.497124] [  480]     0   480     6627        1      19       95             0 systemd-logind
[60604.497929] [  481]   999   481   134608        0      60     2165             0 polkitd
[60604.498689] [  482]     0   482     5381        0      15       59             0 irqbalance
[60604.499486] [  483]    81   483    14590        1      34      213          -900 dbus-daemon
[60604.500283] [  484]    32   484    17305        0      38      160             0 rpcbind
[60604.501042] [  485]     0   485    48770        0      36      126             0 gssproxy
[60604.501815] [  486]     0   486   137505        0      87      654             0 NetworkManager
[60604.502639] [  501]   998   501    30087        0      29      123             0 chronyd
[60604.503403] [  533]     0   533    26849        1      53      499             0 dhclient
[60604.504171] [  895]     0   895   143453       42      98     2797             0 tuned
[60604.504929] [  896]     0   896    28203        1      55      257         -1000 sshd
[60604.505686] [  900]     0   900    74575        8      73      916             0 rsyslogd
[60604.506456] [  906]     0   906     6791        1      18       62             0 xinetd
[60604.507223] [  913]    29   913    10605        0      24      209             0 rpc.statd
[60604.508002] [  917]   997   917    56469        0      23      285             0 munged
[60604.508760] [  992]     0   992    31570        1      19      155             0 crond
[60604.509518] [  993]     0   993     6476        0      19       52             0 atd
[60604.510256] [  996]     0   996   167981        0      69      580             0 automount
[60604.511040] [ 1007]     0  1007    27522        1      12       33             0 agetty
[60604.511804] [ 1009]     0  1009    27522        1       9       32             0 agetty
[60604.512563] [ 1181]     0  1181    22408        0      43      265             0 master
[60604.513318] [ 1200]    89  1200    22451        0      46      253             0 qmgr
[60604.514075] [10963]     0 10963    39169        0      78      351             0 sshd
[60604.514809] [10965]     0 10965    28296        1      14       58             0 run_test.sh
[60604.515601] [11233]     0 11233    29536        1      16      790             0 bash
[60604.516346] [21585]     0 21585    29536        0      13      790             0 bash
[60604.517086] [21586]     0 21586    26988        0      10       27             0 tee
[60604.517808] [21767]     0 21767    29573        1      14      840             0 bash
[60604.518551] [31867]    89 31867    22434        0      45      251             0 pickup
[60604.519302] [11943]     0 11943    29607        1      14      863             0 bash
[60604.520033] [11944]     0 11944    26988        0      10       28             0 tee
[60604.520775] [12397]     0 12397    29607        0      14      863             0 bash
[60604.521528] [12398]     0 12398    29607        0      14      863             0 bash
[60604.522264] [12911]     0 12911    24022        0      22       84             0 pdsh
[60604.523013] [12912]     0 12912    29228        0      14       42             0 sed
[60604.523754] [15184]     0 15184    29474        1      13      732             0 rundbench
[60604.524547] [15195]     0 15195     1618       26       9       52             0 dbench
[60604.525302] [15196]     0 15196     1620       61       9       62             0 dbench
[60604.526052] [16324]     0 16324    30920       53      15       89             0 tar
[60604.526795] [16325]     0 16325    30852       57      17       81             0 tar
[60604.527548] Kernel panic - not syncing: Out of memory: system-wide panic_on_oom is enabled

[60604.528460] CPU: 0 PID: 16324 Comm: tar Kdump: loaded Tainted: G           OE  ------------   3.10.0-862.9.1.el7.x86_64 #1
[60604.529473] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[60604.530011] Call Trace:
[60604.530259]  [&amp;lt;ffffffff81b0e84e&amp;gt;] dump_stack+0x19/0x1b
[60604.530746]  [&amp;lt;ffffffff81b08b50&amp;gt;] panic+0xe8/0x21f
[60604.531196]  [&amp;lt;ffffffff81598295&amp;gt;] check_panic_on_oom+0x55/0x60
[60604.531747]  [&amp;lt;ffffffff8159868b&amp;gt;] out_of_memory+0x23b/0x4f0
[60604.532268]  [&amp;lt;ffffffff8159f224&amp;gt;] __alloc_pages_nodemask+0xaa4/0xbb0
[60604.532865]  [&amp;lt;ffffffff815ec525&amp;gt;] alloc_pages_vma+0xb5/0x200
[60604.533400]  [&amp;lt;ffffffff815dae45&amp;gt;] __read_swap_cache_async+0x115/0x190
[60604.533994]  [&amp;lt;ffffffff815daee6&amp;gt;] read_swap_cache_async+0x26/0x60
[60604.534565]  [&amp;lt;ffffffff815dafc8&amp;gt;] swapin_readahead+0xa8/0x110
[60604.535096]  [&amp;lt;ffffffff815c5f37&amp;gt;] handle_pte_fault+0x777/0xc30
[60604.535643]  [&amp;lt;ffffffff815c7c3d&amp;gt;] handle_mm_fault+0x39d/0x9b0
[60604.536180]  [&amp;lt;ffffffff81525092&amp;gt;] ? from_kgid_munged+0x12/0x20
[60604.536733]  [&amp;lt;ffffffff81b1b557&amp;gt;] __do_page_fault+0x197/0x4f0
[60604.537268]  [&amp;lt;ffffffff81b1b996&amp;gt;] trace_do_page_fault+0x56/0x150
[60604.537828]  [&amp;lt;ffffffff81b1af22&amp;gt;] do_async_page_fault+0x22/0xf0
[60604.538383]  [&amp;lt;ffffffff81b17788&amp;gt;] async_page_fault+0x28/0x30
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The kernel-crash for &lt;a href=&quot;https://testing.whamcloud.com/test_sets/0bb1397a-9bb9-11e8-8ee3-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/0bb1397a-9bb9-11e8-8ee3-52540065bddc&lt;/a&gt; has tar envoking the oom-killer, but we have a few different *_newfstat calls in the kernel crash:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[44806.676691] Lustre: DEBUG MARKER: test_26 fail mds1 1 times
[44998.330738] tar invoked oom-killer: gfp_mask=0x200da, order=0, oom_score_adj=0
[44998.332386] tar cpuset=/ mems_allowed=0
[44998.333090] CPU: 0 PID: 28253 Comm: tar Kdump: loaded Tainted: G        W  OE  ------------   3.10.0-862.9.1.el7.x86_64 #1
[44998.335039] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[44998.335669] Call Trace:
[44998.335982]  [&amp;lt;ffffffffabd0e84e&amp;gt;] dump_stack+0x19/0x1b
[44998.336497]  [&amp;lt;ffffffffabd0a1d0&amp;gt;] dump_header+0x90/0x229
[44998.337027]  [&amp;lt;ffffffffabd1badf&amp;gt;] ? notifier_call_chain+0x4f/0x70
[44998.337645]  [&amp;lt;ffffffffab6c17b8&amp;gt;] ? __blocking_notifier_call_chain+0x58/0x70
[44998.338336]  [&amp;lt;ffffffffab79826e&amp;gt;] check_panic_on_oom+0x2e/0x60
[44998.338902]  [&amp;lt;ffffffffab79868b&amp;gt;] out_of_memory+0x23b/0x4f0
[44998.339448]  [&amp;lt;ffffffffab79f224&amp;gt;] __alloc_pages_nodemask+0xaa4/0xbb0
[44998.340074]  [&amp;lt;ffffffffab7ec525&amp;gt;] alloc_pages_vma+0xb5/0x200
[44998.340639]  [&amp;lt;ffffffffab7dae45&amp;gt;] __read_swap_cache_async+0x115/0x190
[44998.341254]  [&amp;lt;ffffffffab7daee6&amp;gt;] read_swap_cache_async+0x26/0x60
[44998.341850]  [&amp;lt;ffffffffab7dafc8&amp;gt;] swapin_readahead+0xa8/0x110
[44998.342416]  [&amp;lt;ffffffffab7c5f37&amp;gt;] handle_pte_fault+0x777/0xc30
[44998.342995]  [&amp;lt;ffffffffab7c7c3d&amp;gt;] handle_mm_fault+0x39d/0x9b0
[44998.343566]  [&amp;lt;ffffffffabd1b557&amp;gt;] __do_page_fault+0x197/0x4f0
[44998.344126]  [&amp;lt;ffffffffabd1b996&amp;gt;] trace_do_page_fault+0x56/0x150
[44998.344722]  [&amp;lt;ffffffffabd1af22&amp;gt;] do_async_page_fault+0x22/0xf0
[44998.345287]  [&amp;lt;ffffffffabd17788&amp;gt;] async_page_fault+0x28/0x30
[44998.345888]  [&amp;lt;ffffffffab959730&amp;gt;] ? copy_user_generic_string+0x30/0x40
[44998.346533]  [&amp;lt;ffffffffab82142f&amp;gt;] ? cp_new_stat+0x14f/0x180
[44998.347077]  [&amp;lt;ffffffffab8215b4&amp;gt;] SYSC_newfstat+0x34/0x60
[44998.347607]  [&amp;lt;ffffffffab82179e&amp;gt;] SyS_newfstat+0xe/0x10
[44998.348120]  [&amp;lt;ffffffffabd20795&amp;gt;] system_call_fastpath+0x1c/0x21
[44998.348710]  [&amp;lt;ffffffffabd206e1&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[44998.349352] Mem-Info:
[44998.349595] active_anon:2 inactive_anon:18 isolated_anon:0
 active_file:15 inactive_file:1090 isolated_file:0
 unevictable:0 dirty:0 writeback:30 unstable:0
 slab_reclaimable:3679 slab_unreclaimable:38446
 mapped:55 shmem:0 pagetables:1385 bounce:0
 free:12871 free_pcp:108 free_cma:0
[44998.352588] Node 0 DMA free:6956kB min:416kB low:520kB high:624kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15992kB managed:15908kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:72kB slab_unreclaimable:684kB kernel_stack:32kB pagetables:24kB unstable:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? yes
[44998.356534] lowmem_reserve[]: 0 1660 1660 1660
[44998.357078] Node 0 DMA32 free:44528kB min:44636kB low:55792kB high:66952kB active_anon:8kB inactive_anon:72kB active_file:60kB inactive_file:4360kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:2080744kB managed:1704004kB mlocked:0kB dirty:0kB writeback:120kB mapped:220kB shmem:0kB slab_reclaimable:14644kB slab_unreclaimable:153100kB kernel_stack:2832kB pagetables:5516kB unstable:0kB bounce:0kB free_pcp:432kB local_pcp:4kB free_cma:0kB writeback_tmp:0kB pages_scanned:1441 all_unreclaimable? yes
[44998.361327] lowmem_reserve[]: 0 0 0 0
[44998.361788] Node 0 DMA: 17*4kB (U) 11*8kB (U) 5*16kB (U) 3*32kB (U) 0*64kB 2*128kB (U) 1*256kB (U) 0*512kB 0*1024kB 3*2048kB (M) 0*4096kB = 6988kB
[44998.363459] Node 0 DMA32: 1871*4kB (EM) 2399*8kB (UEM) 1117*16kB (M) 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 44548kB
[44998.365051] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
[44998.365894] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
[44998.366710] 1163 total pagecache pages
[44998.367087] 0 pages in swap cache
[44998.367423] Swap cache stats: add 95573, delete 95573, find 17225/30172
[44998.368063] Free swap  = 3521020kB
[44998.368403] Total swap = 3671036kB
[44998.368744] 524184 pages RAM
[44998.369026] 0 pages HighMem/MovableOnly
[44998.369409] 94206 pages reserved
[44998.369738] [ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name
[44998.370539] [  337]     0   337    10055        1      24       98             0 systemd-journal
[44998.371399] [  359]     0   359    29149        0      26       79             0 lvmetad
[44998.372180] [  364]     0   364    11100        1      24      146         -1000 systemd-udevd
[44998.373025] [  455]     0   455    13877        0      27      119         -1000 auditd
[44998.373804] [  481]     0   481     5381        0      15       59             0 irqbalance
[44998.374612] [  484]    32   484    17305        0      36      139             0 rpcbind
[44998.375402] [  486]    81   486    14554        1      33      176          -900 dbus-daemon
[44998.376211] [  489]     0   489    48770        0      37      126             0 gssproxy
[44998.377005] [  492]   998   492    30087        0      29      124             0 chronyd
[44998.377795] [  499]     0   499   137506        0      87     1128             0 NetworkManager
[44998.378649] [  500]   999   500   134608        0      61     1403             0 polkitd
[44998.379433] [  501]     0   501     6594        1      17       77             0 systemd-logind
[44998.380252] [  546]     0   546    26849        1      53      498             0 dhclient
[44998.381077] [  900]     0   900    28203        1      60      257         -1000 sshd
[44998.381846] [  902]     0   902   143453        0      96     3303             0 tuned
[44998.382623] [  909]     0   909    74575        0      75      908             0 rsyslogd
[44998.383418] [  913]    29   913    10605        0      25      209             0 rpc.statd
[44998.384215] [  914]     0   914     6791        1      17       63             0 xinetd
[44998.385003] [  918]   997   918    56469        0      21      274             0 munged
[44998.385798] [  983]     0   983     6476        0      19       52             0 atd
[44998.386544] [  985]     0   985    31570        0      21      155             0 crond
[44998.387319] [  993]     0   993   167982        0      69      547             0 automount
[44998.388108] [  997]     0   997    27522        1       9       32             0 agetty
[44998.388892] [ 1001]     0  1001    27522        1      12       32             0 agetty
[44998.389690] [ 1320]     0  1320    22408        0      42      259             0 master
[44998.390467] [ 1346]    89  1346    22451        0      45      254             0 qmgr
[44998.391223] [10968]     0 10968    39169        0      77      365             0 sshd
[44998.391985] [10970]     0 10970    28296        1      13       58             0 run_test.sh
[44998.392810] [11240]     0 11240    29470        1      16      733             0 bash
[44998.393561] [18492]    89 18492    22434        0      46      252             0 pickup
[44998.394334] [ 1201]     0  1201    29470        0      13      733             0 bash
[44998.395082] [ 1202]     0  1202    26988        0       9       27             0 tee
[44998.395840] [ 1396]     0  1396    29538        1      12      784             0 bash
[44998.396621] [23895]     0 23895    29538        1      12      806             0 bash
[44998.397382] [23896]     0 23896    26988        0      10       28             0 tee
[44998.398123] [24348]     0 24348    29538        0      12      806             0 bash
[44998.398885] [24349]     0 24349    29538        0      12      806             0 bash
[44998.399652] [24854]     0 24854    24023        0      21       87             0 pdsh
[44998.400403] [24855]     0 24855    29228        0      13       42             0 sed
[44998.401140] [27114]     0 27114    29438        1      14      675             0 rundbench
[44998.401941] [27125]     0 27125     1618        0       9       44             0 dbench
[44998.402713] [27126]     0 27126     1620        1       9       56             0 dbench
[44998.403486] [28252]     0 28252    30920        0      17       95             0 tar
[44998.404231] [28253]     0 28253    30852        0      16       77             0 tar
[44998.404986] [28255]     0 28255    40840        6      37      209             0 crond
[44998.405759] Kernel panic - not syncing: Out of memory: system-wide panic_on_oom is enabled

[44998.406685] CPU: 0 PID: 28253 Comm: tar Kdump: loaded Tainted: G        W  OE  ------------   3.10.0-862.9.1.el7.x86_64 #1
[44998.407727] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[44998.408277] Call Trace:
[44998.408522]  [&amp;lt;ffffffffabd0e84e&amp;gt;] dump_stack+0x19/0x1b
[44998.409021]  [&amp;lt;ffffffffabd08b50&amp;gt;] panic+0xe8/0x21f
[44998.409491]  [&amp;lt;ffffffffab798295&amp;gt;] check_panic_on_oom+0x55/0x60
[44998.410058]  [&amp;lt;ffffffffab79868b&amp;gt;] out_of_memory+0x23b/0x4f0
[44998.410588]  [&amp;lt;ffffffffab79f224&amp;gt;] __alloc_pages_nodemask+0xaa4/0xbb0
[44998.411198]  [&amp;lt;ffffffffab7ec525&amp;gt;] alloc_pages_vma+0xb5/0x200
[44998.411747]  [&amp;lt;ffffffffab7dae45&amp;gt;] __read_swap_cache_async+0x115/0x190
[44998.412363]  [&amp;lt;ffffffffab7daee6&amp;gt;] read_swap_cache_async+0x26/0x60
[44998.412951]  [&amp;lt;ffffffffab7dafc8&amp;gt;] swapin_readahead+0xa8/0x110
[44998.413498]  [&amp;lt;ffffffffab7c5f37&amp;gt;] handle_pte_fault+0x777/0xc30
[44998.414062]  [&amp;lt;ffffffffab7c7c3d&amp;gt;] handle_mm_fault+0x39d/0x9b0
[44998.414610]  [&amp;lt;ffffffffabd1b557&amp;gt;] __do_page_fault+0x197/0x4f0
[44998.415172]  [&amp;lt;ffffffffabd1b996&amp;gt;] trace_do_page_fault+0x56/0x150
[44998.415747]  [&amp;lt;ffffffffabd1af22&amp;gt;] do_async_page_fault+0x22/0xf0
[44998.416313]  [&amp;lt;ffffffffabd17788&amp;gt;] async_page_fault+0x28/0x30
[44998.416860]  [&amp;lt;ffffffffab959730&amp;gt;] ? copy_user_generic_string+0x30/0x40
[44998.417479]  [&amp;lt;ffffffffab82142f&amp;gt;] ? cp_new_stat+0x14f/0x180
[44998.418024]  [&amp;lt;ffffffffab8215b4&amp;gt;] SYSC_newfstat+0x34/0x60
[44998.418542]  [&amp;lt;ffffffffab82179e&amp;gt;] SyS_newfstat+0xe/0x10
[44998.419056]  [&amp;lt;ffffffffabd20795&amp;gt;] system_call_fastpath+0x1c/0x21
[44998.419651]  [&amp;lt;ffffffffabd206e1&amp;gt;] ? system_call_after_swapgs+0xae/0x146
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="232430" author="gerrit" created="Wed, 22 Aug 2018 15:16:42 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/33052&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33052&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: stop running replay-dual test 26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: debba6d3ae60a2448c5a59f46b751b605c2ee69c&lt;/p&gt;</comment>
                            <comment id="233351" author="gerrit" created="Tue, 11 Sep 2018 20:17:07 +0000"  >&lt;p&gt;John L. Hammond (jhammond@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/33052/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33052/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: stop running replay-dual test 26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: c5427cbab935259c54957b2ff50e7736f240cd08&lt;/p&gt;</comment>
                            <comment id="304203" author="gerrit" created="Fri, 11 Jun 2021 00:50:06 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43977&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43977&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: skip replay-dual test_24/25&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 1398315916128263f37a5b53b0d1a9286c5b3574&lt;/p&gt;</comment>
                            <comment id="304204" author="gerrit" created="Fri, 11 Jun 2021 00:55:09 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43978&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43978&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8934a96b91ce014a7fe73689fd2d293f436cd716&lt;/p&gt;</comment>
                            <comment id="304205" author="gerrit" created="Fri, 11 Jun 2021 00:55:10 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43979&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43979&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_25&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b188061abcc6e73ea52e99b18797bd74e01e6d75&lt;/p&gt;</comment>
                            <comment id="304206" author="gerrit" created="Fri, 11 Jun 2021 00:56:03 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43980&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43980&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_24&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 87211c8150c48a1a7876ac52cd2e30b34814eaa3&lt;/p&gt;</comment>
                            <comment id="304228" author="gerrit" created="Fri, 11 Jun 2021 07:23:23 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43982&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43982&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 0f509199a25db416759c3bbcce85c6b79d623585&lt;/p&gt;</comment>
                            <comment id="304297" author="gerrit" created="Fri, 11 Jun 2021 16:33:29 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/43977/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43977/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: skip replay-dual test_24/25&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 13e11cf70cc8102d006a681276094517c22e4a47&lt;/p&gt;</comment>
                            <comment id="305647" author="gerrit" created="Sun, 27 Jun 2021 10:58:16 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/43978/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43978/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 8cd0ad8c3f7755a9ff41da297a5130a6857fae5c&lt;/p&gt;</comment>
                            <comment id="321388" author="gerrit" created="Thu, 23 Dec 2021 07:16:44 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/43982/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43982/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7372&quot; title=&quot;replay-dual test_26: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7372&quot;&gt;&lt;del&gt;LU-7372&lt;/del&gt;&lt;/a&gt; tests: re-enable replay-dual test_26&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 2da8f7adbe4a0c3eeecf8fda44fb6a4e4f9a16dd&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="51107">LU-10771</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="52302">LU-11038</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="64608">LU-14749</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="34317">LU-7716</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="34578">LU-7765</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="62787">LU-14406</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="65339">LU-14878</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="22968">LU-4572</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="34007">LU-7640</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="34203">LU-7692</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="38851">LU-8502</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="11273">LU-482</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="34376">LU-7725</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="20203" name="1453855057.tgz" size="25321208" author="jay" created="Wed, 27 Jan 2016 01:03:49 +0000"/>
                            <attachment id="20201" name="log-7372" size="66125" author="sarah" created="Tue, 26 Jan 2016 18:03:45 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Thu, 24 Dec 2015 17:50:07 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                            <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxs1r:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Mon, 2 Nov 2015 17:50:07 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>