<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:50:08 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12156] interop: replay-single test 70f hangs in lctl</title>
                <link>https://jira.whamcloud.com/browse/LU-12156</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;replay-single test_70f hangs in lctl. Looking at a recent hang, &lt;a href=&quot;https://testing.whamcloud.com/test_sets/fd5c3ddc-557d-11e9-9720-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/fd5c3ddc-557d-11e9-9720-52540065bddc&lt;/a&gt; , the last thing seem in the client test log is&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&#8230;
Started lustre-OST0000
CMD: onyx-46vm1.onyx.whamcloud.com,onyx-46vm2 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/sbin:/sbin:/bin::/sbin:/bin:/usr/sbin: NAME=autotest_config bash rpc.sh wait_import_state_mount \(FULL\|IDLE\) osc.lustre-OST0000-osc-ffff*.ost_server_uuid 
onyx-46vm1: == rpc test complete, duration -o sec ================================================================ 05:09:05 (1554181745)
onyx-46vm2: == rpc test complete, duration -o sec ================================================================ 05:09:05 (1554181745)
onyx-46vm1: onyx-46vm1.onyx.whamcloud.com: executing wait_import_state_mount (FULL|IDLE) osc.lustre-OST0000-osc-ffff*.ost_server_uuid
onyx-46vm2: onyx-46vm2.onyx.whamcloud.com: executing wait_import_state_mount (FULL|IDLE) osc.lustre-OST0000-osc-ffff*.ost_server_uuid
onyx-46vm1: CMD: onyx-46vm1.onyx.whamcloud.com lctl get_param -n at_max
onyx-46vm1: osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
onyx-46vm2: CMD: onyx-46vm2.onyx.whamcloud.com lctl get_param -n at_max
onyx-46vm2: osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looking at the console log on the client 1 (vm1), we see lctl hanging&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&#8230;
[ 6712.545693] Lustre: DEBUG MARKER: lctl get_param -n at_max
[ 6712.769788] Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6712.985691] Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6720.276410] Lustre: DEBUG MARKER: mcreate /mnt/lustre/fsa-$(hostname); rm /mnt/lustre/fsa-$(hostname)
[ 6720.621479] Lustre: DEBUG MARKER: if [ -d /mnt/lustre2 ]; then mcreate /mnt/lustre2/fsa-$(hostname); rm /mnt/lustre2/fsa-$(hostname); fi
[ 6723.273530] Lustre: DEBUG MARKER: local REPLAY BARRIER on lustre-OST0000
[ 6724.493700] Lustre: DEBUG MARKER: /usr/sbin/lctl mark test_70f failing OST 2 times
[ 6724.691853] Lustre: DEBUG MARKER: test_70f failing OST 2 times
[ 6727.305612] Lustre: lustre-OST0000-osc-ffff96191a900000: Connection to lustre-OST0000 (at 10.2.8.199@tcp) was lost; in progress operations using this service will wait for recovery to complete
[ 6727.307316] Lustre: Skipped 8 previous similar messages
[ 6840.242867] INFO: task lctl:18670 blocked for more than 120 seconds.
[ 6840.243565] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[ 6840.244388] lctl            D ffff961939174100     0 18670  18669 0x00000080
[ 6840.245230] Call Trace:
[ 6840.245537]  [&amp;lt;ffffffffb5b94a9d&amp;gt;] ? list_del+0xd/0x30
[ 6840.246074]  [&amp;lt;ffffffffb5f67bc9&amp;gt;] schedule+0x29/0x70
[ 6840.246655]  [&amp;lt;ffffffffb5f656a1&amp;gt;] schedule_timeout+0x221/0x2d0
[ 6840.247264]  [&amp;lt;ffffffffb58d64f0&amp;gt;] ? try_to_wake_up+0x190/0x390
[ 6840.247981]  [&amp;lt;ffffffffb5f67f7d&amp;gt;] wait_for_completion+0xfd/0x140
[ 6840.248574]  [&amp;lt;ffffffffb58d67b0&amp;gt;] ? wake_up_state+0x20/0x20
[ 6840.249239]  [&amp;lt;ffffffffc0c9e6fd&amp;gt;] __ldlm_bl_to_thread+0xad/0x150 [ptlrpc]
[ 6840.250015]  [&amp;lt;ffffffffc0c9ed2b&amp;gt;] ldlm_bl_to_thread+0x33b/0x510 [ptlrpc]
[ 6840.250706]  [&amp;lt;ffffffffc0ca33d9&amp;gt;] ldlm_bl_to_thread_list+0x19/0x20 [ptlrpc]
[ 6840.251431]  [&amp;lt;ffffffffc0c9d0c6&amp;gt;] ldlm_cancel_lru+0x76/0x170 [ptlrpc]
[ 6840.252173]  [&amp;lt;ffffffffc0c8dbe5&amp;gt;] lru_size_store+0x145/0x480 [ptlrpc]
[ 6840.252910]  [&amp;lt;ffffffffc0abc09a&amp;gt;] lustre_attr_store+0x1a/0x20 [obdclass]
[ 6840.253661]  [&amp;lt;ffffffffb5acb6e2&amp;gt;] sysfs_kf_write+0x42/0x50
[ 6840.254232]  [&amp;lt;ffffffffb5acacbb&amp;gt;] kernfs_fop_write+0xeb/0x160
[ 6840.254926]  [&amp;lt;ffffffffb5a410a0&amp;gt;] vfs_write+0xc0/0x1f0
[ 6840.255456]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 6840.256119]  [&amp;lt;ffffffffb5a41ebf&amp;gt;] SyS_write+0x7f/0xf0
[ 6840.256708]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 6840.257385]  [&amp;lt;ffffffffb5f74ddb&amp;gt;] system_call_fastpath+0x22/0x27
[ 6840.258055]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
&#8230;
[ 7276.489872] LustreError: 6024:0:(osc_cache.c:955:osc_extent_wait()) extent ffff96191b551dc0@{[0 -&amp;gt; 0/1023], [3|0|+|rpc|wiuY|ffff961924371280], [28672|1|+|-|ffff96191d8b2fc0|1024|ffff96193a76d140]} lustre-OST0000-osc-ffff96191a900000: wait ext to 0 timedout, recovery in progress?
[ 7276.492281] LustreError: 6024:0:(osc_cache.c:955:osc_extent_wait()) ### extent: ffff96191b551dc0 ns: lustre-OST0000-osc-ffff96191a900000 lock: ffff96191d8b2fc0/0x40b5a0904351ea00 lrc: 3/0,0 mode: PW/PW res: [0x2782:0x0:0x0].0x0 rrc: 2 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;4095) flags: 0x29400000000 nid: local remote: 0xafa2f3157ef8581d expref: -99 pid: 13958 timeout: 0 lvb_type: 1
[ 7302.224848] Lustre: 14779:0:(client.c:2134:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1554182067/real 1554182067]  req@ffff96191b307600 x1629670629270288/t0(0) o4-&amp;gt;lustre-OST0000-osc-ffff96191a900000@10.2.8.199@tcp:6/4 lens 4584/448 e 4 to 1 dl 1554182175 ref 2 fl Rpc:X/2/ffffffff rc 0/-1
[ 7302.227597] Lustre: 14779:0:(client.c:2134:ptlrpc_expire_one_request()) Skipped 4 previous similar messages
[ 7320.304861] INFO: task lctl:18670 blocked for more than 120 seconds.
[ 7320.305552] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[ 7320.306332] lctl            D ffff961939174100     0 18670  18669 0x00000080
[ 7320.307084] Call Trace:
[ 7320.307424]  [&amp;lt;ffffffffb5b94a9d&amp;gt;] ? list_del+0xd/0x30
[ 7320.307938]  [&amp;lt;ffffffffb5f67bc9&amp;gt;] schedule+0x29/0x70
[ 7320.308481]  [&amp;lt;ffffffffb5f656a1&amp;gt;] schedule_timeout+0x221/0x2d0
[ 7320.309063]  [&amp;lt;ffffffffb58d64f0&amp;gt;] ? try_to_wake_up+0x190/0x390
[ 7320.309782]  [&amp;lt;ffffffffb5f67f7d&amp;gt;] wait_for_completion+0xfd/0x140
[ 7320.310382]  [&amp;lt;ffffffffb58d67b0&amp;gt;] ? wake_up_state+0x20/0x20
[ 7320.311029]  [&amp;lt;ffffffffc0c9e6fd&amp;gt;] __ldlm_bl_to_thread+0xad/0x150 [ptlrpc]
[ 7320.311701]  [&amp;lt;ffffffffc0c9ed2b&amp;gt;] ldlm_bl_to_thread+0x33b/0x510 [ptlrpc]
[ 7320.312385]  [&amp;lt;ffffffffc0ca33d9&amp;gt;] ldlm_bl_to_thread_list+0x19/0x20 [ptlrpc]
[ 7320.313158]  [&amp;lt;ffffffffc0c9d0c6&amp;gt;] ldlm_cancel_lru+0x76/0x170 [ptlrpc]
[ 7320.313793]  [&amp;lt;ffffffffc0c8dbe5&amp;gt;] lru_size_store+0x145/0x480 [ptlrpc]
[ 7320.314453]  [&amp;lt;ffffffffc0abc09a&amp;gt;] lustre_attr_store+0x1a/0x20 [obdclass]
[ 7320.315184]  [&amp;lt;ffffffffb5acb6e2&amp;gt;] sysfs_kf_write+0x42/0x50
[ 7320.315720]  [&amp;lt;ffffffffb5acacbb&amp;gt;] kernfs_fop_write+0xeb/0x160
[ 7320.316292]  [&amp;lt;ffffffffb5a410a0&amp;gt;] vfs_write+0xc0/0x1f0
[ 7320.316961]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 7320.317602]  [&amp;lt;ffffffffb5a41ebf&amp;gt;] SyS_write+0x7f/0xf0
[ 7320.318106]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 7320.318813]  [&amp;lt;ffffffffb5f74ddb&amp;gt;] system_call_fastpath+0x22/0x27
[ 7320.319404]  [&amp;lt;ffffffffb5f74d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 7409.400854] Lustre: lustre-OST0000-osc-ffff96191a900000: Connection to lustre-OST0000 (at 10.2.8.199@tcp) was lost; in progress operations using this service will wait for recovery to complete
[ 7409.402543] Lustre: Skipped 4 previous similar messages
[ 7409.405522] Lustre: lustre-OST0000-osc-ffff96191a900000: Connection restored to 10.2.8.199@tcp (at 10.2.8.199@tcp)
[ 7409.406583] Lustre: Skipped 5 previous similar messages
[ 7440.319864] INFO: task lctl:18670 blocked for more than 120 seconds.
[ 7440.320764] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
&#8230;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The console log for the OSS (vm3) has many errors. Here are just a few&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&#8230;
[ 6629.884030] Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6629.981017] Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6630.095178] Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6630.232869] Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-ffff*.ost_server_uuid in FULL state after 0 sec
[ 6636.601540] Lustre: DEBUG MARKER: sync; sync; sync
[ 6638.070508] Lustre: DEBUG MARKER: /usr/sbin/lctl --device lustre-OST0000 notransno
[ 6638.421691] Lustre: DEBUG MARKER: modprobe dm-flakey;
[ 6638.421691] 			 dmsetup targets | grep -q flakey
[ 6638.775038] Lustre: DEBUG MARKER: dmsetup table /dev/mapper/ost1_flakey
[ 6639.121066] Lustre: DEBUG MARKER: dmsetup suspend --nolockfs --noflush /dev/mapper/ost1_flakey
[ 6639.467783] Lustre: DEBUG MARKER: dmsetup load /dev/mapper/ost1_flakey --table &quot;0 4194304 flakey 252:0 0 0 1800 1 drop_writes&quot;
[ 6639.818180] Lustre: DEBUG MARKER: dmsetup resume /dev/mapper/ost1_flakey
[ 6640.180823] Lustre: DEBUG MARKER: /usr/sbin/lctl mark ost1 REPLAY BARRIER on lustre-OST0000
[ 6640.357370] Lustre: DEBUG MARKER: ost1 REPLAY BARRIER on lustre-OST0000
[ 6641.603284] Lustre: DEBUG MARKER: /usr/sbin/lctl mark test_70f failing OST 2 times
[ 6641.800703] Lustre: DEBUG MARKER: test_70f failing OST 2 times
[ 6641.985657] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost1&apos; &apos; /proc/mounts || true
[ 6642.334386] Lustre: DEBUG MARKER: umount -d /mnt/lustre-ost1
[ 6642.516911] Lustre: Failing over lustre-OST0000
[ 6643.823863] Lustre: lustre-OST0000: Not available for connect from 10.2.8.198@tcp (stopping)
[ 6643.824741] Lustre: Skipped 1 previous similar message
[ 6646.530498] Lustre: lustre-OST0000: Not available for connect from 10.2.8.200@tcp (stopping)
[ 6646.531419] Lustre: Skipped 1 previous similar message
[ 6653.837143] Lustre: lustre-OST0000: Not available for connect from 10.2.8.198@tcp (stopping)
[ 6653.838059] Lustre: Skipped 2 previous similar messages
[ 6656.520352] Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 4. Is it stuck?
[ 6656.521512] Lustre: lustre-OST0000: UNLINKED ffff9d8d17319c00 f1ce44c5-b64b-e307-df96-edafd36f5e17 10.2.8.197@tcp 2 (1 0 0) 1 0 0 0:           (null)  34359849384 stale:0
[ 6663.853170] Lustre: lustre-OST0000: Not available for connect from 10.2.8.198@tcp (stopping)
[ 6663.854170] Lustre: Skipped 5 previous similar messages
[ 6666.624361] LNet: Service thread pid 23196 was inactive for 40.13s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 6666.626018] Pid: 23196, comm: ll_ost_io00_030 3.10.0-957.el7_lustre.x86_64 #1 SMP Wed Dec 12 15:03:08 UTC 2018
[ 6666.626965] Call Trace:
[ 6666.627234]  [&amp;lt;ffffffffc0c3054e&amp;gt;] target_bulk_io+0x4ae/0xac0 [ptlrpc]
[ 6666.627927]  [&amp;lt;ffffffffc0cdc5df&amp;gt;] tgt_brw_write+0x11af/0x17d0 [ptlrpc]
[ 6666.628618]  [&amp;lt;ffffffffc0cd842a&amp;gt;] tgt_request_handle+0x92a/0x1370 [ptlrpc]
[ 6666.629320]  [&amp;lt;ffffffffc0c80e5b&amp;gt;] ptlrpc_server_handle_request+0x23b/0xaa0 [ptlrpc]
[ 6666.630107]  [&amp;lt;ffffffffc0c845a2&amp;gt;] ptlrpc_main+0xa92/0x1e40 [ptlrpc]
[ 6666.630764]  [&amp;lt;ffffffffbd8c1c31&amp;gt;] kthread+0xd1/0xe0
[ 6666.631281]  [&amp;lt;ffffffffbdf74c37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[ 6666.632221]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[ 6666.632797] LustreError: dumping log to /tmp/lustre-log.1554181623.23196
[ 6672.522355] Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 16 seconds. The obd refcount = 4. Is it stuck?
[ 6672.523567] Lustre: lustre-OST0000: UNLINKED ffff9d8d17319c00 f1ce44c5-b64b-e307-df96-edafd36f5e17 10.2.8.197@tcp 2 (1 0 0) 1 0 0 0:           (null)  34359849384 stale:0
[ 6681.324388] Lustre: lustre-OST0000: Not available for connect from 10.2.8.200@tcp (stopping)
[ 6681.325429] Lustre: Skipped 10 previous similar messages
[ 6704.524362] Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 32 seconds. The obd refcount = 4. Is it stuck?
[ 6704.525590] Lustre: lustre-OST0000: UNLINKED ffff9d8d17319c00 f1ce44c5-b64b-e307-df96-edafd36f5e17 10.2.8.197@tcp 2 (1 0 0) 1 0 0 0:           (null)  34359849384 stale:0
[ 6713.933190] Lustre: lustre-OST0000: Not available for connect from 10.2.8.198@tcp (stopping)
[ 6713.934636] Lustre: Skipped 18 previous similar messages
[ 6726.487412] LNet: Service thread pid 23196 completed after 100.00s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[ 6743.633474] LustreError: 25479:0:(client.c:1166:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff9d8d14d60f00 x1629670662416544/t0(0) o101-&amp;gt;lustre-MDT0000-lwp-OST0000@10.2.8.200@tcp:23/10 lens 456/496 e 0 to 0 dl 0 ref 2 fl Rpc:/0/ffffffff rc 0/-1
[ 6743.635773] LustreError: 25479:0:(qsd_reint.c:56:qsd_reint_completion()) lustre-OST0000: failed to enqueue global quota lock, glb fid:[0x200000006:0x20000:0x0], rc:-5
[ 6743.637213] LustreError: 25479:0:(qsd_reint.c:56:qsd_reint_completion()) Skipped 7 previous similar messages
[ 6768.526352] Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 64 seconds. The obd refcount = 2. Is it stuck?
[ 6768.556672] Lustre: server umount lustre-OST0000 complete
&#8230;
[ 6826.496386] LNet: Service thread pid 23196 was inactive for 40.12s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 6826.498063] Pid: 23196, comm: ll_ost_io00_030 3.10.0-957.el7_lustre.x86_64 #1 SMP Wed Dec 12 15:03:08 UTC 2018
[ 6826.499022] Call Trace:
[ 6826.499284]  [&amp;lt;ffffffffc0c3054e&amp;gt;] target_bulk_io+0x4ae/0xac0 [ptlrpc]
[ 6826.499983]  [&amp;lt;ffffffffc0cdc5df&amp;gt;] tgt_brw_write+0x11af/0x17d0 [ptlrpc]
[ 6826.500678]  [&amp;lt;ffffffffc0cd842a&amp;gt;] tgt_request_handle+0x92a/0x1370 [ptlrpc]
[ 6826.501429]  [&amp;lt;ffffffffc0c80e5b&amp;gt;] ptlrpc_server_handle_request+0x23b/0xaa0 [ptlrpc]
[ 6826.502405]  [&amp;lt;ffffffffc0c845a2&amp;gt;] ptlrpc_main+0xa92/0x1e40 [ptlrpc]
[ 6826.503075]  [&amp;lt;ffffffffbd8c1c31&amp;gt;] kthread+0xd1/0xe0
[ 6826.503580]  [&amp;lt;ffffffffbdf74c37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[ 6826.504242]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[ 6826.504841] LustreError: dumping log to /tmp/lustre-log.1554181783.23196
[ 6886.380373] LustreError: 23196:0:(ldlm_lib.c:3237:target_bulk_io()) @@@ timeout on bulk WRITE after 100+0s  req@ffff9d8d1a5dac50 x1629670629270288/t0(0) o4-&amp;gt;f1ce44c5-b64b-e307-df96-edafd36f5e17@10.2.8.197@tcp:5/0 lens 4584/448 e 0 to 0 dl 1554181850 ref 1 fl Interpret:/2/0 rc 0/0
[ 6886.382825] LustreError: 23196:0:(ldlm_lib.c:3237:target_bulk_io()) Skipped 1 previous similar message
[ 6886.383776] Lustre: lustre-OST0000: Bulk IO write error with f1ce44c5-b64b-e307-df96-edafd36f5e17 (at 10.2.8.197@tcp), client will retry: rc = -110
&#8230;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;So far we are only seeing replay-single hang in test 70f in interop testing (server/client):&lt;br/&gt;
2.10.6/2.12.0.52 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/fd5c3ddc-557d-11e9-9720-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/fd5c3ddc-557d-11e9-9720-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.6/2.12.0.28 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/890b500a-4e0a-11e9-b98a-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/890b500a-4e0a-11e9-b98a-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.6/2.12.50.85 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/6167f5d8-25a3-11e9-b901-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/6167f5d8-25a3-11e9-b901-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.6/2.12.50.83 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/8633b650-2552-11e9-b54c-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/8633b650-2552-11e9-b54c-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.6/2.12.0 RC4 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/2f9059a2-0635-11e9-a97c-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/2f9059a2-0635-11e9-a97c-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.5/2.11.55.65 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/93246f30-cbca-11e8-82f2-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/93246f30-cbca-11e8-82f2-52540065bddc&lt;/a&gt;&lt;br/&gt;
2.10.4 RC2/2.11.53.32 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/7060aef2-9815-11e8-b0aa-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/7060aef2-9815-11e8-b0aa-52540065bddc&lt;/a&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="55347">LU-12156</key>
            <summary>interop: replay-single test 70f hangs in lctl</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>interop</label>
                    </labels>
                <created>Thu, 4 Apr 2019 02:02:51 +0000</created>
                <updated>Thu, 4 Apr 2019 13:54:38 +0000</updated>
                                            <version>Lustre 2.10.6</version>
                    <version>Lustre 2.12.1</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>1</watches>
                                                                                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00ehb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>