<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:36:17 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10572] Hang in conf-sanity test 69 on zfs</title>
                <link>https://jira.whamcloud.com/browse/LU-10572</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I introduced conf-sanity testing with hang-detection in my testing recently and immediately hit a problem in conf-sanity 69 with zfs.&lt;/p&gt;

&lt;p&gt;Console output looks like this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 8960.194121] Lustre: DEBUG MARKER: == conf-sanity test 69: replace an OST with the same index =========================================== 13:23:05 (1517163785)
[ 8961.087061] Lustre: Lustre: Build Version: 2.10.57_58_gb6ec609
[ 8961.516996] LNet: Added LNI 192.168.123.104@tcp [8/512/0/180] 
[ 8961.518530] LNet: Accept all, port 988
[ 8963.178685] Lustre: 7186:0:(gss_svc_upcall.c:1185:gss_init_svc_upcall()) Init channel is not opened by lsvcgssd, following request might be dropped until lsvcgssd is active
[ 8963.181215] Lustre: 7186:0:(gss_mech_switch.c:71:lgss_mech_register()) Register gssnull mechanism
[ 8963.182155] Key type lgssc registered
[ 8964.538931] Lustre: Echo OBD driver; http://www.lustre.org/
[ 8970.072662] Lustre: MGS: Connection restored to MGC192.168.123.104@tcp_0 (at 0@lo)
[ 8971.975498] Lustre: lustre-MDT0000: Imperative Recovery not enabled, recovery window 60-180
[ 8975.966431] Lustre: lustre-MDT0000: Connection restored to 192.168.123.104@tcp (at 0@lo)
[ 8976.096272] Lustre: lustre-OST0000: Imperative Recovery not enabled, recovery window 60-180
[ 8977.021749] Lustre: lustre-OST0000: Connection restored to 192.168.123.104@tcp (at 0@lo)
[ 8977.023981] Lustre: Skipped 1 previous similar message
[ 8977.042514] Lustre: lustre-OST0000: deleting orphan objects from 0x0:3 to 0x0:33
[ 8977.556824] Lustre: Mounted lustre-client
[ 8979.082258] Lustre: DEBUG MARKER: On OST0, 2176 inodes available. Want 99936.
[ 9329.642647] LNet: Service thread pid 18129 was inactive for 40.06s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 9329.645105] Pid: 18129, comm: mdt03_003
[ 9329.645761] 
Call Trace:
[ 9329.648546]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[ 9329.649263]  [&amp;lt;ffffffff81700952&amp;gt;] schedule_timeout+0x162/0x2a0
[ 9329.650032]  [&amp;lt;ffffffffa0227ea5&amp;gt;] ? cfs_block_sigsinv+0x45/0xa0 [libcfs]
[ 9329.651617]  [&amp;lt;ffffffff810879d0&amp;gt;] ? process_timeout+0x0/0x10
[ 9329.652397]  [&amp;lt;ffffffffa0227ed1&amp;gt;] ? cfs_block_sigsinv+0x71/0xa0 [libcfs]
[ 9329.653319]  [&amp;lt;ffffffffa13be2c8&amp;gt;] osp_precreate_reserve+0x318/0x830 [osp]
[ 9329.654066]  [&amp;lt;ffffffff810b7cc0&amp;gt;] ? default_wake_function+0x0/0x20
[ 9329.654770]  [&amp;lt;ffffffffa13b3b53&amp;gt;] osp_declare_create+0x193/0x590 [osp]
[ 9329.655491]  [&amp;lt;ffffffffa1367ab2&amp;gt;] lod_sub_declare_create+0xe2/0x210 [lod]
[ 9329.656237]  [&amp;lt;ffffffffa1360dce&amp;gt;] lod_qos_declare_object_on+0xbe/0x3a0 [lod]
[ 9329.668533]  [&amp;lt;ffffffffa136688d&amp;gt;] lod_qos_prep_create+0x10dd/0x1800 [lod]
[ 9329.669452]  [&amp;lt;ffffffffa13674e5&amp;gt;] lod_prepare_create+0x255/0x350 [lod]
[ 9329.670180]  [&amp;lt;ffffffffa13588de&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[ 9329.671284]  [&amp;lt;ffffffffa1367ab2&amp;gt;] ? lod_sub_declare_create+0xe2/0x210 [lod]
[ 9329.672301]  [&amp;lt;ffffffffa135d074&amp;gt;] lod_declare_create+0x204/0x5a0 [lod]
[ 9329.672874]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[ 9329.673435]  [&amp;lt;ffffffffa12300ff&amp;gt;] mdd_declare_create_object_internal+0xdf/0x2f0 [mdd]
[ 9329.674527]  [&amp;lt;ffffffffa1220953&amp;gt;] mdd_declare_create+0x53/0xe30 [mdd]
[ 9329.675323]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[ 9329.676039]  [&amp;lt;ffffffffa1224c69&amp;gt;] mdd_create+0x859/0x1410 [mdd]
[ 9329.676746]  [&amp;lt;ffffffffa12a75f4&amp;gt;] mdt_reint_open+0x2144/0x3190 [mdt]
[ 9329.677500]  [&amp;lt;ffffffffa129b900&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
[ 9329.679262]  [&amp;lt;ffffffffa127b5cb&amp;gt;] mdt_reint_internal+0x5fb/0x990 [mdt]
[ 9329.681088]  [&amp;lt;ffffffffa1287927&amp;gt;] mdt_intent_reint+0x157/0x420 [mdt]
[ 9329.682780]  [&amp;lt;ffffffffa127e6a2&amp;gt;] mdt_intent_opc+0x442/0xad0 [mdt]
[ 9329.684883]  [&amp;lt;ffffffffa05e2490&amp;gt;] ? lustre_swab_ldlm_intent+0x0/0x20 [ptlrpc]
[ 9329.686409]  [&amp;lt;ffffffffa1286116&amp;gt;] mdt_intent_policy+0x1a6/0x360 [mdt]
[ 9329.688190]  [&amp;lt;ffffffffa059209a&amp;gt;] ldlm_lock_enqueue+0x35a/0x8d0 [ptlrpc]
[ 9329.693351]  [&amp;lt;ffffffffa05ba543&amp;gt;] ldlm_handle_enqueue0+0x913/0x13f0 [ptlrpc]
[ 9329.694593]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[ 9329.696037]  [&amp;lt;ffffffffa063ec32&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[ 9329.697482]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[ 9329.698628]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[ 9329.700755]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[ 9329.702545]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[ 9329.704139]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[ 9329.705436]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[ 9329.706325]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[ 9329.707374]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[ 9329.708383] 
[ 9329.709145] LustreError: dumping log to /tmp/lustre-log.1517164156.18129
[ 9331.070281] LNet: Service thread pid 18129 completed after 41.49s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[10031.724772] LNet: Service thread pid 29224 was inactive for 40.02s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[10031.727847] Pid: 29224, comm: ll_ost00_007
[10031.736752] 
Call Trace:
[10031.738429]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[10031.739283]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[10031.740453]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[10031.740960]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[10031.741456]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[10031.741957]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[10031.742442]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[10031.742969]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[10031.743500]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[10031.743995]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[10031.744519]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[10031.808155]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[10031.809367]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[10031.810419]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[10031.811779]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[10031.812493]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[10031.813192]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[10031.813831]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10031.814592]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[10031.815264]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10031.815936] 
[10031.816501] LustreError: dumping log to /tmp/lustre-log.1517164858.29224
[10043.338621] LNet: Service thread pid 29224 completed after 51.63s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[10281.450667] LNet: Service thread pid 8784 was inactive for 40.10s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[10281.453783] Pid: 8784, comm: ll_ost06_001
[10281.455676] 
Call Trace:
[10281.456876]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[10281.470252]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[10281.471091]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[10281.471942]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[10281.472837]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[10281.473563]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[10281.483714]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[10281.484251]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[10281.485140]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[10281.485926]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[10281.486792]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[10281.496076]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[10281.497057]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[10281.497794]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[10281.499265]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[10281.512041]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[10281.512751]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[10281.513402]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10281.514076]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[10281.514786]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10281.515472] 
[10281.516186] LustreError: dumping log to /tmp/lustre-log.1517165107.8784
[10293.208563] LNet: Service thread pid 8784 completed after 51.86s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[10656.875336] LNet: Service thread pid 687 was inactive for 40.11s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[10656.878646] Pid: 687, comm: ll_ost01_004
[10656.879276] 
Call Trace:
[10656.880437]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[10656.883153]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[10656.883758]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[10656.884387]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[10656.884938]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[10656.885448]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[10656.885962]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[10656.886435]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[10656.886957]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[10656.887439]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[10656.888005]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[10656.888573]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[10656.889143]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[10656.910113]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[10656.911377]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[10656.912038]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[10656.912543]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[10656.913030]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10656.913633]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[10656.914298]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10656.914770] 
[10656.915235] LustreError: dumping log to /tmp/lustre-log.1517165483.687
[10664.977508] LNet: Service thread pid 687 completed after 48.21s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[10817.394032] LNet: Service thread pid 3812 was inactive for 40.05s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[10817.396885] Pid: 3812, comm: ll_ost05_007
[10817.397532] 
Call Trace:
[10817.407582]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[10817.414462]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[10817.415219]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[10817.415935]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[10817.416663]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[10817.417376]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[10817.434514]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[10817.435237]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[10817.435958]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[10817.436663]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[10817.437367]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[10817.453458]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[10817.459190]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[10817.460135]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[10817.461755]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[10817.462857]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[10817.463724]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[10817.464563]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10817.465320]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[10817.474397]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10817.475266] 
[10817.475867] LustreError: dumping log to /tmp/lustre-log.1517165643.3812
[10828.195354] LNet: Service thread pid 3812 completed after 50.85s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[10875.628351] LNet: Service thread pid 7993 was inactive for 40.03s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[10875.631117] Pid: 7993, comm: mdt06_002
[10875.631734] 
Call Trace:
[10875.633730]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[10875.634417]  [&amp;lt;ffffffff81700952&amp;gt;] schedule_timeout+0x162/0x2a0
[10875.636968]  [&amp;lt;ffffffffa0227ea5&amp;gt;] ? cfs_block_sigsinv+0x45/0xa0 [libcfs]
[10875.637682]  [&amp;lt;ffffffff810879d0&amp;gt;] ? process_timeout+0x0/0x10
[10875.638380]  [&amp;lt;ffffffffa0227ed1&amp;gt;] ? cfs_block_sigsinv+0x71/0xa0 [libcfs]
[10875.639103]  [&amp;lt;ffffffffa13be2c8&amp;gt;] osp_precreate_reserve+0x318/0x830 [osp]
[10875.639799]  [&amp;lt;ffffffff810b7cc0&amp;gt;] ? default_wake_function+0x0/0x20
[10875.648290]  [&amp;lt;ffffffffa13b3b53&amp;gt;] osp_declare_create+0x193/0x590 [osp]
[10875.649029]  [&amp;lt;ffffffffa1367ab2&amp;gt;] lod_sub_declare_create+0xe2/0x210 [lod]
[10875.649755]  [&amp;lt;ffffffffa1360dce&amp;gt;] lod_qos_declare_object_on+0xbe/0x3a0 [lod]
[10875.650483]  [&amp;lt;ffffffffa136688d&amp;gt;] lod_qos_prep_create+0x10dd/0x1800 [lod]
[10875.651199]  [&amp;lt;ffffffffa13674e5&amp;gt;] lod_prepare_create+0x255/0x350 [lod]
[10875.651900]  [&amp;lt;ffffffffa13588de&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[10875.653563]  [&amp;lt;ffffffffa1367ab2&amp;gt;] ? lod_sub_declare_create+0xe2/0x210 [lod]
[10875.654291]  [&amp;lt;ffffffffa135d074&amp;gt;] lod_declare_create+0x204/0x5a0 [lod]
[10875.655017]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[10875.655885]  [&amp;lt;ffffffffa12300ff&amp;gt;] mdd_declare_create_object_internal+0xdf/0x2f0 [mdd]
[10875.668080]  [&amp;lt;ffffffffa1220953&amp;gt;] mdd_declare_create+0x53/0xe30 [mdd]
[10875.677779]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[10875.678567]  [&amp;lt;ffffffffa1224c69&amp;gt;] mdd_create+0x859/0x1410 [mdd]
[10875.679110]  [&amp;lt;ffffffffa12a75f4&amp;gt;] mdt_reint_open+0x2144/0x3190 [mdt]
[10875.679778]  [&amp;lt;ffffffffa129b900&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
[10875.680363]  [&amp;lt;ffffffffa127b5cb&amp;gt;] mdt_reint_internal+0x5fb/0x990 [mdt]
[10875.680878]  [&amp;lt;ffffffffa1287927&amp;gt;] mdt_intent_reint+0x157/0x420 [mdt]
[10875.682050]  [&amp;lt;ffffffffa127e6a2&amp;gt;] mdt_intent_opc+0x442/0xad0 [mdt]
[10875.682629]  [&amp;lt;ffffffffa05e2490&amp;gt;] ? lustre_swab_ldlm_intent+0x0/0x20 [ptlrpc]
[10875.683321]  [&amp;lt;ffffffffa1286116&amp;gt;] mdt_intent_policy+0x1a6/0x360 [mdt]
[10875.683850]  [&amp;lt;ffffffffa059209a&amp;gt;] ldlm_lock_enqueue+0x35a/0x8d0 [ptlrpc]
[10875.684545]  [&amp;lt;ffffffffa05ba543&amp;gt;] ldlm_handle_enqueue0+0x913/0x13f0 [ptlrpc]
[10875.685128]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[10875.685825]  [&amp;lt;ffffffffa063ec32&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[10875.686373]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[10875.686909]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[10875.688364]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[10875.688910]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[10875.689628]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[10875.690224]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10875.690698]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[10875.691349]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[10875.691818] 
[10875.692431] LustreError: dumping log to /tmp/lustre-log.1517165701.7993
[10875.723487] LNet: Service thread pid 7993 completed after 40.13s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[11386.093776] LNet: Service thread pid 7976 was inactive for 40.10s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[11386.096569] Pid: 7976, comm: mdt01_000
[11386.099291] 
Call Trace:
[11386.100643]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[11386.101324]  [&amp;lt;ffffffff81700952&amp;gt;] schedule_timeout+0x162/0x2a0
[11386.102030]  [&amp;lt;ffffffffa0227ea5&amp;gt;] ? cfs_block_sigsinv+0x45/0xa0 [libcfs]
[11386.102855]  [&amp;lt;ffffffff810879d0&amp;gt;] ? process_timeout+0x0/0x10
[11386.103616]  [&amp;lt;ffffffffa0227ed1&amp;gt;] ? cfs_block_sigsinv+0x71/0xa0 [libcfs]
[11386.104349]  [&amp;lt;ffffffffa13be2c8&amp;gt;] osp_precreate_reserve+0x318/0x830 [osp]
[11386.105054]  [&amp;lt;ffffffff810b7cc0&amp;gt;] ? default_wake_function+0x0/0x20
[11386.105869]  [&amp;lt;ffffffffa13b3b53&amp;gt;] osp_declare_create+0x193/0x590 [osp]
[11386.106720]  [&amp;lt;ffffffffa1367ab2&amp;gt;] lod_sub_declare_create+0xe2/0x210 [lod]
[11386.108825]  [&amp;lt;ffffffffa1360dce&amp;gt;] lod_qos_declare_object_on+0xbe/0x3a0 [lod]
[11386.163398]  [&amp;lt;ffffffffa136688d&amp;gt;] lod_qos_prep_create+0x10dd/0x1800 [lod]
[11386.164178]  [&amp;lt;ffffffffa13674e5&amp;gt;] lod_prepare_create+0x255/0x350 [lod]
[11386.166825]  [&amp;lt;ffffffffa13588de&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[11386.168090]  [&amp;lt;ffffffffa1367ab2&amp;gt;] ? lod_sub_declare_create+0xe2/0x210 [lod]
[11386.168794]  [&amp;lt;ffffffffa135d074&amp;gt;] lod_declare_create+0x204/0x5a0 [lod]
[11386.178636]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[11386.180684]  [&amp;lt;ffffffffa12300ff&amp;gt;] mdd_declare_create_object_internal+0xdf/0x2f0 [mdd]
[11386.197534]  [&amp;lt;ffffffffa1220953&amp;gt;] mdd_declare_create+0x53/0xe30 [mdd]
[11386.202676]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[11386.203980]  [&amp;lt;ffffffffa1224c69&amp;gt;] mdd_create+0x859/0x1410 [mdd]
[11386.213411]  [&amp;lt;ffffffffa12a75f4&amp;gt;] mdt_reint_open+0x2144/0x3190 [mdt]
[11386.218277]  [&amp;lt;ffffffffa129b900&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
[11386.228289]  [&amp;lt;ffffffffa127b5cb&amp;gt;] mdt_reint_internal+0x5fb/0x990 [mdt]
[11386.245324]  [&amp;lt;ffffffffa1287927&amp;gt;] mdt_intent_reint+0x157/0x420 [mdt]
[11386.298037]  [&amp;lt;ffffffffa127e6a2&amp;gt;] mdt_intent_opc+0x442/0xad0 [mdt]
[11386.298932]  [&amp;lt;ffffffffa05e2490&amp;gt;] ? lustre_swab_ldlm_intent+0x0/0x20 [ptlrpc]
[11386.300428]  [&amp;lt;ffffffffa1286116&amp;gt;] mdt_intent_policy+0x1a6/0x360 [mdt]
[11386.312540]  [&amp;lt;ffffffffa059209a&amp;gt;] ldlm_lock_enqueue+0x35a/0x8d0 [ptlrpc]
[11386.313331]  [&amp;lt;ffffffffa05ba543&amp;gt;] ldlm_handle_enqueue0+0x913/0x13f0 [ptlrpc]
[11386.314102]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[11386.314873]  [&amp;lt;ffffffffa063ec32&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[11386.371225]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[11386.372055]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[11386.373355]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[11386.392312]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[11386.393037]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[11386.393686]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11386.394358]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[11386.395042]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11386.395735] 
[11386.396429] LustreError: dumping log to /tmp/lustre-log.1517166212.7976
[11391.441778] LNet: Service thread pid 7976 completed after 45.44s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[11646.381483] LNet: Service thread pid 27607 was inactive for 40.14s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[11646.406010] Pid: 27607, comm: ll_ost04_009
[11646.406793] 
Call Trace:
[11646.408187]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[11646.427239]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[11646.427962]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[11646.428667]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[11646.429380]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[11646.483045]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[11646.483791]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[11646.487195]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[11646.488017]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[11646.488726]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[11646.489443]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[11646.490213]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[11646.490978]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[11646.491713]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[11646.585560]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[11646.587445]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[11646.588380]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[11646.589045]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11646.589502]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[11646.589984]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11646.590438] 
[11646.590834] LustreError: dumping log to /tmp/lustre-log.1517166472.27607
[11650.848143] Pid: 7986, comm: mdt04_001
[11650.911957] 
Call Trace:
[11650.915395]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[11650.915855]  [&amp;lt;ffffffff81700952&amp;gt;] schedule_timeout+0x162/0x2a0
[11650.916386]  [&amp;lt;ffffffffa0227ea5&amp;gt;] ? cfs_block_sigsinv+0x45/0xa0 [libcfs]
[11650.917146]  [&amp;lt;ffffffff810879d0&amp;gt;] ? process_timeout+0x0/0x10
[11650.917808]  [&amp;lt;ffffffffa0227ed1&amp;gt;] ? cfs_block_sigsinv+0x71/0xa0 [libcfs]
[11650.919882]  [&amp;lt;ffffffffa13be2c8&amp;gt;] osp_precreate_reserve+0x318/0x830 [osp]
[11650.920577]  [&amp;lt;ffffffff810b7cc0&amp;gt;] ? default_wake_function+0x0/0x20
[11650.927677]  [&amp;lt;ffffffffa13b3b53&amp;gt;] osp_declare_create+0x193/0x590 [osp]
[11650.928576]  [&amp;lt;ffffffffa1367ab2&amp;gt;] lod_sub_declare_create+0xe2/0x210 [lod]
[11650.929423]  [&amp;lt;ffffffffa1360dce&amp;gt;] lod_qos_declare_object_on+0xbe/0x3a0 [lod]
[11650.930299]  [&amp;lt;ffffffffa136688d&amp;gt;] lod_qos_prep_create+0x10dd/0x1800 [lod]
[11650.931383]  [&amp;lt;ffffffffa13674e5&amp;gt;] lod_prepare_create+0x255/0x350 [lod]
[11650.932126]  [&amp;lt;ffffffffa13588de&amp;gt;] lod_declare_striped_create+0x1ee/0x980 [lod]
[11650.933401]  [&amp;lt;ffffffffa1367ab2&amp;gt;] ? lod_sub_declare_create+0xe2/0x210 [lod]
[11650.942676]  [&amp;lt;ffffffffa135d074&amp;gt;] lod_declare_create+0x204/0x5a0 [lod]
[11650.943445]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[11650.944187]  [&amp;lt;ffffffffa12300ff&amp;gt;] mdd_declare_create_object_internal+0xdf/0x2f0 [mdd]
[11650.945472]  [&amp;lt;ffffffffa1220953&amp;gt;] mdd_declare_create+0x53/0xe30 [mdd]
[11650.955257]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[11650.955987]  [&amp;lt;ffffffffa1224c69&amp;gt;] mdd_create+0x859/0x1410 [mdd]
[11650.956711]  [&amp;lt;ffffffffa12a75f4&amp;gt;] mdt_reint_open+0x2144/0x3190 [mdt]
[11650.957500]  [&amp;lt;ffffffffa129b900&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
[11650.958246]  [&amp;lt;ffffffffa127b5cb&amp;gt;] mdt_reint_internal+0x5fb/0x990 [mdt]
[11650.958967]  [&amp;lt;ffffffffa1287927&amp;gt;] mdt_intent_reint+0x157/0x420 [mdt]
[11650.959667]  [&amp;lt;ffffffffa127e6a2&amp;gt;] mdt_intent_opc+0x442/0xad0 [mdt]
[11650.960434]  [&amp;lt;ffffffffa05e2490&amp;gt;] ? lustre_swab_ldlm_intent+0x0/0x20 [ptlrpc]
[11650.961232]  [&amp;lt;ffffffffa1286116&amp;gt;] mdt_intent_policy+0x1a6/0x360 [mdt]
[11650.961996]  [&amp;lt;ffffffffa059209a&amp;gt;] ldlm_lock_enqueue+0x35a/0x8d0 [ptlrpc]
[11650.962893]  [&amp;lt;ffffffffa05ba543&amp;gt;] ldlm_handle_enqueue0+0x913/0x13f0 [ptlrpc]
[11650.963881]  [&amp;lt;ffffffffa0232f47&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[11650.964779]  [&amp;lt;ffffffffa063ec32&amp;gt;] tgt_enqueue+0x62/0x210 [ptlrpc]
[11650.965676]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[11650.967478]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[11650.968910]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[11650.969707]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[11650.970237]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[11650.982955]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11650.983796]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[11651.002062]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[11651.002815] 
[11651.003438] LustreError: dumping log to /tmp/lustre-log.1517166477.7986
[11657.848212] LNet: Service thread pid 7986 completed after 47.10s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[12003.206088] LNet: Service thread pid 29404 was inactive for 40.16s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[12003.248977] LNet: Skipped 1 previous similar message
[12003.249773] Pid: 29404, comm: ll_ost00_008
[12003.250417] 
Call Trace:
[12003.251575]  [&amp;lt;ffffffff81704319&amp;gt;] schedule+0x29/0x70
[12003.252248]  [&amp;lt;ffffffffa09a4fad&amp;gt;] cv_wait_common+0x17d/0x240 [spl]
[12003.266969]  [&amp;lt;ffffffff810a4070&amp;gt;] ? autoremove_wake_function+0x0/0x40
[12003.267718]  [&amp;lt;ffffffffa09a5085&amp;gt;] __cv_wait+0x15/0x20 [spl]
[12003.268463]  [&amp;lt;ffffffffa0b5d293&amp;gt;] txg_wait_open+0xe3/0x1a0 [zfs]
[12003.270220]  [&amp;lt;ffffffffa0b065d2&amp;gt;] dmu_tx_wait+0x5c2/0x5d0 [zfs]
[12003.270931]  [&amp;lt;ffffffffa0b06675&amp;gt;] dmu_tx_assign+0x95/0x5a0 [zfs]
[12003.271637]  [&amp;lt;ffffffffa09bad87&amp;gt;] osd_trans_start+0xa7/0x3c0 [osd_zfs]
[12003.278620]  [&amp;lt;ffffffffa1415596&amp;gt;] ofd_precreate_objects+0xa46/0x1aa0 [ofd]
[12003.279417]  [&amp;lt;ffffffffa1408cda&amp;gt;] ofd_create_hdl+0x47a/0x20b0 [ofd]
[12003.288353]  [&amp;lt;ffffffffa022f734&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
[12003.289170]  [&amp;lt;ffffffffa05e07a0&amp;gt;] ? lustre_pack_reply_v2+0x1a0/0x2a0 [ptlrpc]
[12003.289937]  [&amp;lt;ffffffffa0646e5b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[12003.290673]  [&amp;lt;ffffffffa05ebc71&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[12003.298125]  [&amp;lt;ffffffffa05efa28&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[12003.298851]  [&amp;lt;ffffffffa05eefd0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[12003.299707]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[12003.309644]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[12003.310163]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[12003.310627]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[12003.311173] 
[12003.311602] LustreError: dumping log to /tmp/lustre-log.1517166829.29404
[12008.410465] LNet: Service thread pid 29404 completed after 45.36s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[12008.414754] LNet: Skipped 1 previous similar message
[12037.466656] SysRq : Trigger a crash
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;As far as I can tell ldiskfs is unaffected.&lt;/p&gt;

&lt;p&gt;I checked in maloo and it passes there in like 600 seconds, so it&apos;s not a regular zfs slowness or anything of the sort.&lt;/p&gt;</description>
                <environment></environment>
        <key id="50420">LU-10572</key>
            <summary>Hang in conf-sanity test 69 on zfs</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                    </labels>
                <created>Sun, 28 Jan 2018 20:04:28 +0000</created>
                <updated>Fri, 18 Jun 2021 20:19:27 +0000</updated>
                            <resolved>Fri, 18 Jun 2021 20:19:27 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="250623" author="simmonsja" created="Wed, 3 Jul 2019 17:27:17 +0000"  >&lt;p&gt;I wonder if upstream kernel commit&#160;&lt;/p&gt;

&lt;p&gt;84e07b9d0ac8728b1865b23498d746861a8ab4c2 would resolve this.&lt;/p&gt;</comment>
                            <comment id="250658" author="simmonsja" created="Thu, 4 Jul 2019 01:45:11 +0000"  >&lt;p&gt;The back trace looks like signal problem but its not &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/sad.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&#160;Worth a try&lt;/p&gt;</comment>
                            <comment id="304910" author="adilger" created="Fri, 18 Jun 2021 20:19:27 +0000"  >&lt;p&gt;Have not seen this in the past 6 months.  While this test is skipped for SLOW in review sessions, it is run in full test sessions (101 passes in a row for ZFS since 2020-12-01).&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="49351">LU-10250</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzrtr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>