<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:08:33 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-14300] sanity-sec test 18 hangs in txg_quiesce on MDS</title>
                <link>https://jira.whamcloud.com/browse/LU-14300</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanity-sec test_18 hangs. It looks like this issue started on 15 MAY 2020 with &lt;a href=&quot;https://testing.whamcloud.com/test_sets/8514ff93-a5ac-45f0-9584-dff4d4e42eae&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/8514ff93-a5ac-45f0-9584-dff4d4e42eae&lt;/a&gt; for Lustre 2.13.53.163. &lt;/p&gt;

&lt;p&gt;Looking at the suite_log for &lt;a href=&quot;https://testing.whamcloud.com/test_sets/6875529f-ecc2-432e-9f20-e8810a08448c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/6875529f-ecc2-432e-9f20-e8810a08448c&lt;/a&gt;, the last thing we see is&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: trevis-201vm1.trevis.whamcloud.com runas -u60003 -g60003 -G60003 lfs quota -q /mnt/lustre
running as uid/gid/euid/egid 60003/60003/60003/60003, groups: 60003
 [lfs] [quota] [-q] [/mnt/lustre]
CMD: trevis-201vm2 chmod 000 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 001 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 002 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 003 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 004 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 005 /mnt/lustre/d18.sanity-sec
CMD: trevis-201vm2 chmod 006 /mnt/lustre/d18.sanity-sec
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looking at the MDS (vm4) console log, we see the hang&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[32726.826140] Lustre: mdt00_003: service thread pid 819155 was inactive for 41.957 seconds. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[32726.826328] Pid: 816983, comm: mdt_rdpg00_000 4.18.0-240.1.1.el8_lustre.x86_64 #1 SMP Wed Dec 9 21:09:51 UTC 2020
[32726.829004] Lustre: Skipped 1 previous similar message
[32726.830561] Call Trace TBD:
[32726.831789] Pid: 819155, comm: mdt00_003 4.18.0-240.1.1.el8_lustre.x86_64 #1 SMP Wed Dec 9 21:09:51 UTC 2020
[32726.833277] Call Trace TBD:
[32808.745683] INFO: task txg_quiesce:816666 blocked for more than 120 seconds.
[32808.746826]       Tainted: P           OE    --------- -  - 4.18.0-240.1.1.el8_lustre.x86_64 #1
[32808.748143] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[32808.749328] txg_quiesce     D    0 816666      2 0x80004080
[32808.750184] Call Trace:
[32808.750601]  __schedule+0x2a6/0x700
[32808.751160]  schedule+0x38/0xa0
[32808.751671]  cv_wait_common+0xfb/0x130 [spl]
[32808.752325]  ? finish_wait+0x80/0x80
[32808.752958]  txg_quiesce_thread+0x2ac/0x3a0 [zfs]
[32808.753741]  ? txg_sync_thread+0x480/0x480 [zfs]
[32808.754470]  ? __thread_exit+0x20/0x20 [spl]
[32808.755136]  thread_generic_wrapper+0x6f/0x80 [spl]
[32808.755892]  kthread+0x112/0x130
[32808.756397]  ? kthread_flush_work_fn+0x10/0x10
[32808.757084]  ret_from_fork+0x35/0x40
[32808.757657] INFO: task mdt_rdpg00_000:816983 blocked for more than 120 seconds.
[32808.758764]       Tainted: P           OE    --------- -  - 4.18.0-240.1.1.el8_lustre.x86_64 #1
[32808.760077] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[32808.761260] mdt_rdpg00_000  D    0 816983      2 0x80004080
[32808.762114] Call Trace:
[32808.762502]  __schedule+0x2a6/0x700
[32808.763046]  schedule+0x38/0xa0
[32808.763536]  rwsem_down_write_slowpath+0x32d/0x4e0
[32808.764303]  ? dmu_tx_callback_register+0x57/0x80 [zfs]
[32808.765124]  qti_lqes_write_lock+0x87/0x190 [lquota]
[32808.765896]  qmt_dqacq0+0x4eb/0x2380 [lquota]
[32808.766566]  ? lqe_locate_find+0x546/0x7c0 [lquota]
[32808.767316]  ? qmt_dqacq+0x668/0x790 [lquota]
[32808.767996]  qmt_dqacq+0x668/0x790 [lquota]
[32808.768669]  mdt_quota_dqacq+0x59/0x120 [mdt]
[32808.769391]  tgt_request_handle+0xc78/0x1910 [ptlrpc]
[32808.770205]  ptlrpc_server_handle_request+0x31a/0xba0 [ptlrpc]
[32808.771130]  ptlrpc_main+0xba4/0x14a0 [ptlrpc]
[32808.771910]  ? __schedule+0x2ae/0x700
[32808.772528]  ? ptlrpc_register_service+0xfb0/0xfb0 [ptlrpc]
[32808.773381]  kthread+0x112/0x130
[32808.773908]  ? kthread_flush_work_fn+0x10/0x10
[32808.774601]  ret_from_fork+0x35/0x40
[32808.775157] INFO: task qmt_reba_lustre:817021 blocked for more than 120 seconds.
[32808.776291]       Tainted: P           OE    --------- -  - 4.18.0-240.1.1.el8_lustre.x86_64 #1
[32808.777627] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[32808.778806] qmt_reba_lustre D    0 817021      2 0x80004080
[32808.779652] Call Trace:
[32808.780033]  __schedule+0x2a6/0x700
[32808.780586]  schedule+0x38/0xa0
[32808.781080]  rwsem_down_write_slowpath+0x32d/0x4e0
[32808.781827]  ? cfs_hash_bd_lookup_intent+0x2a/0xc0 [libcfs]
[32808.782688]  qmt_reba_thread+0x6ca/0x9b0 [lquota]
[32808.783407]  ? qmt_glimpse_lock.isra.19+0xfb0/0xfb0 [lquota]
[32808.784269]  kthread+0x112/0x130
[32808.784777]  ? kthread_flush_work_fn+0x10/0x10
[32808.785452]  ret_from_fork+0x35/0x40
[32808.786011] INFO: task mdt00_003:819155 blocked for more than 120 seconds.
[32808.787045]       Tainted: P           OE    --------- -  - 4.18.0-240.1.1.el8_lustre.x86_64 #1
[32808.788345] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[32808.789522] mdt00_003       D    0 819155      2 0x80004080
[32808.790365] Call Trace:
[32808.790760]  __schedule+0x2a6/0x700
[32808.791290]  ? __wake_up_common+0x7a/0x190
[32808.791922]  schedule+0x38/0xa0
[32808.792406]  io_schedule+0x12/0x40
[32808.792941]  cv_wait_common+0xaf/0x130 [spl]
[32808.793592]  ? finish_wait+0x80/0x80
[32808.794184]  txg_wait_synced_impl+0xc2/0x110 [zfs]
[32808.794967]  txg_wait_synced+0xc/0x40 [zfs]
[32808.795619]  osd_trans_stop+0x51f/0x560 [osd_zfs]
[32808.796374]  top_trans_stop+0x9f/0xd30 [ptlrpc]
[32808.797089]  ? mdd_acl_chmod+0x185/0x4b0 [mdd]
[32808.797791]  lod_trans_stop+0x9b/0x340 [lod]
[32808.798453]  ? mdd_attr_set+0x54a/0xc20 [mdd]
[32808.799129]  mdd_trans_stop+0x29/0x17f [mdd]
[32808.799808]  mdt_reint_setattr+0xc7a/0x12a0 [mdt]
[32808.800565]  ? nodemap_get_from_exp+0xea/0x270 [ptlrpc]
[32808.801372]  mdt_reint_rec+0x11f/0x250 [mdt]
[32808.802044]  mdt_reint_internal+0x498/0x780 [mdt]
[32808.802778]  mdt_reint+0x5e/0x100 [mdt]
[32808.803400]  tgt_request_handle+0xc78/0x1910 [ptlrpc]
[32808.804212]  ptlrpc_server_handle_request+0x31a/0xba0 [ptlrpc]
[32808.805135]  ptlrpc_main+0xba4/0x14a0 [ptlrpc]
[32808.805824]  ? __schedule+0x2ae/0x700
[32808.806412]  ? ptlrpc_register_service+0xfb0/0xfb0 [ptlrpc]
[32808.808415]  kthread+0x112/0x130
[32808.808950]  ? kthread_flush_work_fn+0x10/0x10
[32808.809648]  ret_from_fork+0x35/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Logs for more hangs are at&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/a8822b08-9e99-4320-8750-fbfffad657a5&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a8822b08-9e99-4320-8750-fbfffad657a5&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/713a62aa-3393-4b43-8178-87398949b6c0&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/713a62aa-3393-4b43-8178-87398949b6c0&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/a801aef1-a4d8-4470-a54e-49c953f2c246&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a801aef1-a4d8-4470-a54e-49c953f2c246&lt;/a&gt;&lt;/p&gt;</description>
                <environment>ZFS and single MDS</environment>
        <key id="62245">LU-14300</key>
            <summary>sanity-sec test 18 hangs in txg_quiesce on MDS</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="scherementsev">Sergey Cheremencev</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Wed, 6 Jan 2021 16:39:55 +0000</created>
                <updated>Fri, 15 Oct 2021 18:10:16 +0000</updated>
                            <resolved>Mon, 11 Oct 2021 13:32:43 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                                    <fixVersion>Lustre 2.15.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="288854" author="jamesanunez" created="Wed, 6 Jan 2021 19:24:32 +0000"  >&lt;p&gt;Sergey - &lt;br/&gt;
These hangs started around the time of the OST pool quotas patch landed. Would you please review this failure and could this issue be due to that patch? Does this test need to change based on the OST pool quota patch?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;</comment>
                            <comment id="289730" author="pjones" created="Mon, 18 Jan 2021 14:48:10 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=sergey&quot; class=&quot;user-hover&quot; rel=&quot;sergey&quot;&gt;sergey&lt;/a&gt; how is your investigation progressing?&lt;/p&gt;</comment>
                            <comment id="290060" author="sergey" created="Thu, 21 Jan 2021 18:45:55 +0000"  >&lt;p&gt;It seems qmt is stuck due to problems at ZFS layer:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[25302.786844] Pid: 15606, comm: mdt_rdpg00_004 3.10.0-1127.19.1.el7.x86_64 #1 SMP Tue Aug 25 17:23:54 UTC 2020
[25302.788312] Call Trace:
[25302.788742]  [&amp;lt;ffffffffc065d325&amp;gt;] cv_wait_common+0x125/0x150 [spl]
[25302.789802]  [&amp;lt;ffffffffc065d365&amp;gt;] __cv_wait+0x15/0x20 [spl]
[25302.790749]  [&amp;lt;ffffffffc07afa5b&amp;gt;] dmu_tx_wait+0x20b/0x3b0 [zfs]
[25302.791765]  [&amp;lt;ffffffffc07afc91&amp;gt;] dmu_tx_assign+0x91/0x490 [zfs]
[25302.792780]  [&amp;lt;ffffffffc11a0109&amp;gt;] osd_trans_start+0x199/0x440 [osd_zfs]
[25302.793914]  [&amp;lt;ffffffffc1159351&amp;gt;] qmt_trans_start_with_slv+0x331/0x7f0 [lquota]
[25302.795114]  [&amp;lt;ffffffffc1150c83&amp;gt;] qmt_dqacq0+0x2d3/0x1ac0 [lquota]
[25302.796090]  [&amp;lt;ffffffffc1152d77&amp;gt;] qmt_dqacq+0x707/0x810 [lquota]
[25302.797089]  [&amp;lt;ffffffffc1332fd2&amp;gt;] mdt_quota_dqacq+0x62/0x150 [mdt]
[25302.798125]  [&amp;lt;ffffffffc0fbf6ba&amp;gt;] tgt_request_handle+0x7ea/0x1750 [ptlrpc]
[25302.799602]  [&amp;lt;ffffffffc0f5f1c6&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[25302.800862]  [&amp;lt;ffffffffc0f63d1c&amp;gt;] ptlrpc_main+0xb3c/0x14e0 [ptlrpc]
[25302.801919]  [&amp;lt;ffffffffa94c6691&amp;gt;] kthread+0xd1/0xe0
[25302.802755]  [&amp;lt;ffffffffa9b92d37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[25302.803791]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;As qmt_dqacq0 takes write semaphore some of quota threads that also need this semaphore may also hung:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[23041.077700] INFO: task qmt_reba_lustre:31100 blocked for more than 120 seconds.
[23041.078865] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[23041.080041] qmt_reba_lustre D ffff9606d327d230     0 31100      2 0x00000080
[23041.081222] Call Trace:
[23041.081617]  [&amp;lt;ffffffffa9b85da9&amp;gt;] schedule+0x29/0x70
[23041.082386]  [&amp;lt;ffffffffa9b874e5&amp;gt;] rwsem_down_write_failed+0x215/0x3c0
[23041.083442]  [&amp;lt;ffffffffa9796777&amp;gt;] call_rwsem_down_write_failed+0x17/0x30
[23041.084458]  [&amp;lt;ffffffffa9b850fd&amp;gt;] down_write+0x2d/0x3d
[23041.085322]  [&amp;lt;ffffffffc1157b24&amp;gt;] qmt_reba_thread+0xa54/0xa70 [lquota]
[23041.086332]  [&amp;lt;ffffffffc11570d0&amp;gt;] ? qmt_glimpse_lock.isra.15+0xaa0/0xaa0 [lquota]
[23041.087526]  [&amp;lt;ffffffffa94c6691&amp;gt;] kthread+0xd1/0xe0
[23041.088287]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40
[23041.089278]  [&amp;lt;ffffffffa9b92d37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[23041.090278]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40 &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;There are also several hung tasks not related to quota, but related to ZFS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[22920.963274] INFO: task txg_quiesce:30858 blocked for more than 120 seconds.
[22920.964413] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[22920.965608] txg_quiesce     D ffff9606ffc1acc0     0 30858      2 0x00000080
[22920.966876] Call Trace:
[22920.967286]  [&amp;lt;ffffffffa94d3820&amp;gt;] ? task_rq_unlock+0x20/0x20
[22920.968202]  [&amp;lt;ffffffffa9b85da9&amp;gt;] schedule+0x29/0x70
[22920.968981]  [&amp;lt;ffffffffc065d325&amp;gt;] cv_wait_common+0x125/0x150 [spl]
[22920.969995]  [&amp;lt;ffffffffa94c7780&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[22920.970890]  [&amp;lt;ffffffffc065d365&amp;gt;] __cv_wait+0x15/0x20 [spl]
[22920.971852]  [&amp;lt;ffffffffc081165b&amp;gt;] txg_quiesce_thread+0x2cb/0x3c0 [zfs]
[22920.972878]  [&amp;lt;ffffffffc0811390&amp;gt;] ? txg_init+0x2b0/0x2b0 [zfs]
[22920.973845]  [&amp;lt;ffffffffc0664e03&amp;gt;] thread_generic_wrapper+0x73/0x80 [spl]
[22920.974875]  [&amp;lt;ffffffffc0664d90&amp;gt;] ? __thread_exit+0x20/0x20 [spl]
[22920.975863]  [&amp;lt;ffffffffa94c6691&amp;gt;] kthread+0xd1/0xe0
[22920.976638]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40
[22920.977638]  [&amp;lt;ffffffffa9b92d37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[22920.978633]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40
[22920.979624] INFO: task qmt_reba_lustre:31100 blocked for more than 120 seconds.&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;And&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[22920.993099] INFO: task mdt00_003:638 blocked for more than 120 seconds.
[22920.994173] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[22920.995356] mdt00_003       D ffff9606de0f9070     0   638      2 0x00000080
[22920.996534] Call Trace:
[22920.996979]  [&amp;lt;ffffffffa9b85da9&amp;gt;] schedule+0x29/0x70
[22920.997749]  [&amp;lt;ffffffffa9b838b1&amp;gt;] schedule_timeout+0x221/0x2d0
[22920.998702]  [&amp;lt;ffffffffa94db9a2&amp;gt;] ? default_wake_function+0x12/0x20
[22920.999675]  [&amp;lt;ffffffffa946d39e&amp;gt;] ? kvm_clock_get_cycles+0x1e/0x20
[22921.000681]  [&amp;lt;ffffffffa9b8549d&amp;gt;] io_schedule_timeout+0xad/0x130
[22921.001601]  [&amp;lt;ffffffffa94c7306&amp;gt;] ? prepare_to_wait_exclusive+0x56/0x90
[22921.002668]  [&amp;lt;ffffffffa9b85538&amp;gt;] io_schedule+0x18/0x20
[22921.003472]  [&amp;lt;ffffffffc065d2b2&amp;gt;] cv_wait_common+0xb2/0x150 [spl]
[22921.004458]  [&amp;lt;ffffffffa94c7780&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[22921.005357]  [&amp;lt;ffffffffc065d388&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
[22921.006336]  [&amp;lt;ffffffffc0810e75&amp;gt;] txg_wait_synced_impl+0xe5/0x130 [zfs]
[22921.007375]  [&amp;lt;ffffffffc0810ed0&amp;gt;] txg_wait_synced+0x10/0x50 [zfs]
[22921.008375]  [&amp;lt;ffffffffc11a39bd&amp;gt;] osd_trans_stop+0x54d/0x5f0 [osd_zfs]
[22921.009430]  [&amp;lt;ffffffffc0fcf9a3&amp;gt;] top_trans_stop+0xa3/0xbf0 [ptlrpc]
[22921.010470]  [&amp;lt;ffffffffc146f7ff&amp;gt;] ? lod_xattr_get+0x37f/0x890 [lod]
[22921.011438]  [&amp;lt;ffffffffc1449afc&amp;gt;] lod_trans_stop+0x25c/0x340 [lod]
[22921.012452]  [&amp;lt;ffffffffc15071f9&amp;gt;] ? mdd_acl_chmod+0x169/0x490 [mdd]
[22921.013419]  [&amp;lt;ffffffffc150961e&amp;gt;] mdd_trans_stop+0x2e/0x174 [mdd]
[22921.014412]  [&amp;lt;ffffffffc14fd2df&amp;gt;] mdd_attr_set+0x65f/0xc80 [mdd]
[22921.015356]  [&amp;lt;ffffffffc1362436&amp;gt;] mdt_attr_set+0x1c6/0x7b0 [mdt]
[22921.016349]  [&amp;lt;ffffffffc13633aa&amp;gt;] mdt_reint_setattr+0x70a/0xf40 [mdt]
[22921.017346]  [&amp;lt;ffffffffc13514c1&amp;gt;] ? mdt_root_squash+0x21/0x430 [mdt]
[22921.018391]  [&amp;lt;ffffffffc1350d3b&amp;gt;] ? ucred_set_audit_enabled.isra.15+0x3b/0x60 [mdt]
[22921.019571]  [&amp;lt;ffffffffc13663e3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[22921.020566]  [&amp;lt;ffffffffc133ea70&amp;gt;] mdt_reint_internal+0x720/0xaf0 [mdt]
[22921.021574]  [&amp;lt;ffffffffc134a607&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[22921.022527]  [&amp;lt;ffffffffc0fbf6ba&amp;gt;] tgt_request_handle+0x7ea/0x1750 [ptlrpc]
[22921.023591]  [&amp;lt;ffffffffc0bccbee&amp;gt;] ? libcfs_nid2str_r+0xfe/0x130 [lnet]
[22921.024674]  [&amp;lt;ffffffffc0f5f1c6&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[22921.025863]  [&amp;lt;ffffffffc0f63d1c&amp;gt;] ptlrpc_main+0xb3c/0x14e0 [ptlrpc]
[22921.026879]  [&amp;lt;ffffffffa94d53fe&amp;gt;] ? finish_task_switch+0x4e/0x1c0
[22921.027835]  [&amp;lt;ffffffffc0f631e0&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[22921.029014]  [&amp;lt;ffffffffa94c6691&amp;gt;] kthread+0xd1/0xe0
[22921.029772]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40
[22921.030761]  [&amp;lt;ffffffffa9b92d37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[22921.031759]  [&amp;lt;ffffffffa94c65c0&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I believe the problem is in ZFS layer that becomes stuck causing other tasks to hung.&lt;br/&gt;
I guess it is better to assign this to somebody who knows ZFS internals.&lt;/p&gt;</comment>
                            <comment id="290066" author="spitzcor" created="Thu, 21 Jan 2021 18:49:13 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=jamesanunez&quot; class=&quot;user-hover&quot; rel=&quot;jamesanunez&quot;&gt;jamesanunez&lt;/a&gt;, just double-checking here, have you seen any instances of this failure with ldiskfs backends?&lt;/p&gt;</comment>
                            <comment id="290380" author="jamesanunez" created="Tue, 26 Jan 2021 16:26:40 +0000"  >&lt;p&gt;Cory - No, we do not see this hang with ldiskfs backends. So far, we&apos;ve only see this for ZFS.&lt;/p&gt;</comment>
                            <comment id="290557" author="bzzz" created="Thu, 28 Jan 2021 06:45:48 +0000"  >&lt;p&gt;lots of threads were waiting for transaction:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[26058.883940] mdt_rdpg00_036  D ffff9606de7be2a0     0 15624      2 0x00000080
[26058.885070] Call Trace:
[26058.885515]  [&amp;lt;ffffffffa9b85da9&amp;gt;] schedule+0x29/0x70
[26058.886284]  [&amp;lt;ffffffffc065d325&amp;gt;] cv_wait_common+0x125/0x150 [spl]
[26058.887285]  [&amp;lt;ffffffffa94c7780&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[26058.888176]  [&amp;lt;ffffffffc065d365&amp;gt;] __cv_wait+0x15/0x20 [spl]
[26058.889126]  [&amp;lt;ffffffffc07afa5b&amp;gt;] dmu_tx_wait+0x20b/0x3b0 [zfs]
[26058.890036]  [&amp;lt;ffffffffa9b84022&amp;gt;] ? mutex_lock+0x12/0x2f
[26058.890920]  [&amp;lt;ffffffffc07afc91&amp;gt;] dmu_tx_assign+0x91/0x490 [zfs]
[26058.891845]  [&amp;lt;ffffffffc11a0109&amp;gt;] osd_trans_start+0x199/0x440 [osd_zfs]
[26058.892911]  [&amp;lt;ffffffffc1159351&amp;gt;] qmt_trans_start_with_slv+0x331/0x7f0 [lquota]
[26058.894032]  [&amp;lt;ffffffffc1150c83&amp;gt;] qmt_dqacq0+0x2d3/0x1ac0 [lquota]
[26058.895052]  [&amp;lt;ffffffffc115b573&amp;gt;] ? qti_lqes_add+0x1a3/0x5c0 [lquota]
[26058.896037]  [&amp;lt;ffffffffc1163854&amp;gt;] ? qmt_pool_lqes_lookup+0x2b4/0x8f0 [lquota]
[26058.897182]  [&amp;lt;ffffffffc1152d77&amp;gt;] qmt_dqacq+0x707/0x810 [lquota]
...
[26057.768904]  [&amp;lt;ffffffffc0810e75&amp;gt;] txg_wait_synced_impl+0xe5/0x130 [zfs]
[26057.769985]  [&amp;lt;ffffffffc0810ed0&amp;gt;] txg_wait_synced+0x10/0x50 [zfs]
[26057.770930]  [&amp;lt;ffffffffc11a39bd&amp;gt;] osd_trans_stop+0x54d/0x5f0 [osd_zfs]
[26057.772015]  [&amp;lt;ffffffffc0fcf9a3&amp;gt;] top_trans_stop+0xa3/0xbf0 [ptlrpc]
[26057.773004]  [&amp;lt;ffffffffc146f7ff&amp;gt;] ? lod_xattr_get+0x37f/0x890 [lod]
[26057.774018]  [&amp;lt;ffffffffc1449afc&amp;gt;] lod_trans_stop+0x25c/0x340 [lod]
[26057.774972]  [&amp;lt;ffffffffc15071f9&amp;gt;] ? mdd_acl_chmod+0x169/0x490 [mdd]
[26057.775985]  [&amp;lt;ffffffffc150961e&amp;gt;] mdd_trans_stop+0x2e/0x174 [mdd]
[26057.776919]  [&amp;lt;ffffffffc14fd2df&amp;gt;] mdd_attr_set+0x65f/0xc80 [mdd]
[26057.777886]  [&amp;lt;ffffffffc1362436&amp;gt;] mdt_attr_set+0x1c6/0x7b0 [mdt]
[26057.778810]  [&amp;lt;ffffffffc13633aa&amp;gt;] mdt_reint_setattr+0x70a/0xf40 [mdt]
[26057.779852]  [&amp;lt;ffffffffc13514c1&amp;gt;] ? mdt_root_squash+0x21/0x430 [mdt]
[26057.780825]  [&amp;lt;ffffffffc1350d3b&amp;gt;] ? ucred_set_audit_enabled.isra.15+0x3b/0x60 [mdt]
[26057.782047]  [&amp;lt;ffffffffc13663e3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[26057.782974]  [&amp;lt;ffffffffc133ea70&amp;gt;] mdt_reint_internal+0x720/0xaf0 [mdt]
[26057.784028]  [&amp;lt;ffffffffc134a607&amp;gt;] mdt_reint+0x67/0x140 [mdt]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;notice none of the threads above hold quota-related mutex/lock.&lt;/p&gt;

&lt;p&gt;but then another thread, holding a transaction, waits for quota semaphore.&lt;br/&gt;
so this thread wss stuck and all another threads were waiting for it to complete transaction.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[26057.805469] mdt_rdpg00_002  D ffff9606f92b6a40     0  7935      2 0x00000080
[26057.806680] Call Trace:
[26057.807100]  [&amp;lt;ffffffffa9b85da9&amp;gt;] schedule+0x29/0x70
[26057.807954]  [&amp;lt;ffffffffa9b874e5&amp;gt;] rwsem_down_write_failed+0x215/0x3c0
[26057.808966]  [&amp;lt;ffffffffa9796777&amp;gt;] call_rwsem_down_write_failed+0x17/0x30
[26057.810072]  [&amp;lt;ffffffffc065e6e0&amp;gt;] ? spl_kmem_alloc+0xf0/0x150 [spl]
[26057.811046]  [&amp;lt;ffffffffa9b850fd&amp;gt;] down_write+0x2d/0x3d
[26057.811903]  [&amp;lt;ffffffffc115d381&amp;gt;] qti_lqes_write_lock+0xb1/0x1b0 [lquota]
[26057.813020]  [&amp;lt;ffffffffc1150c9e&amp;gt;] qmt_dqacq0+0x2ee/0x1ac0 [lquota]
[26057.813998]  [&amp;lt;ffffffffa9626702&amp;gt;] ? kmem_cache_free+0x1e2/0x200
[26057.814980]  [&amp;lt;ffffffffc1163854&amp;gt;] ? qmt_pool_lqes_lookup+0x2b4/0x8f0 [lquota]
[26057.816096]  [&amp;lt;ffffffffc1152d77&amp;gt;] qmt_dqacq+0x707/0x810 [lquota]
[26057.817117]  [&amp;lt;ffffffffc0f52aff&amp;gt;] ? lustre_pack_reply_flags+0x6f/0x1e0 [ptlrpc]
[26057.818231]  [&amp;lt;ffffffffc1332fd2&amp;gt;] mdt_quota_dqacq+0x62/0x150 [mdt]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="290616" author="gerrit" created="Thu, 28 Jan 2021 17:17:39 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41354&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41354&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; build: Revert ZFS version to 0.8.4&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: cf6215327af9f224bd4e9a6913541338a4c94de2&lt;/p&gt;</comment>
                            <comment id="290617" author="bzzz" created="Thu, 28 Jan 2021 17:23:04 +0000"  >&lt;p&gt;while I&apos;m not 100$ sure, I haven&apos;t seen an evidence/explanation why this is ZFS-related&lt;/p&gt;</comment>
                            <comment id="290890" author="jamesanunez" created="Mon, 1 Feb 2021 20:40:48 +0000"  >&lt;p&gt;Just a note to document where we&apos;ve seen this issue and for what configurations. &lt;/p&gt;

&lt;p&gt;We&apos;ve seen this issue only on the master branch starting with Lustre 2.13.53.163 for ZFS with a single MDS (not DNE) for full and full-patchless test sessions. &lt;/p&gt;

&lt;p&gt;We&apos;ve seen this hang for the following RHEL and ZFS versions:&lt;br/&gt;
RHEL 7.7 and ZFS 0.8.3-1 &#8211; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/366aac3e-bef7-47f7-95de-a22b61a46a98&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/366aac3e-bef7-47f7-95de-a22b61a46a98&lt;/a&gt;&lt;br/&gt;
RHEL 7.7 and ZFS 0.8.4-1 &#8211; &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/34fca719-e391-4700-9ade-6e1363e749f7&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/34fca719-e391-4700-9ade-6e1363e749f7&lt;/a&gt;&lt;br/&gt;
RHEL 7.8 and ZFS 0.8.3-1 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/d6fe3f68-313f-4214-a290-28d9c39d5601&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d6fe3f68-313f-4214-a290-28d9c39d5601&lt;/a&gt;&lt;br/&gt;
RHEL 7.8 and ZFS 0.8.4-1 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/02684af6-02e6-4205-a8ec-33a6bd136a17&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/02684af6-02e6-4205-a8ec-33a6bd136a17&lt;/a&gt;&lt;br/&gt;
RHEL 7.9 and ZFS 0.8.4-1 &#8211; &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/b1e0c45d-dc4c-4b1b-b162-538293429dc0&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/b1e0c45d-dc4c-4b1b-b162-538293429dc0&lt;/a&gt;&lt;br/&gt;
RHEL 8.2 and ZFS 0.8.4-1 - &lt;a href=&quot;https://testing.whamcloud.com/test_sets/0d5f60bc-e05b-4059-a1e2-7952b337f00e&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/0d5f60bc-e05b-4059-a1e2-7952b337f00e&lt;/a&gt;&lt;br/&gt;
RHEL 8.3 and ZFS 2.0.0-1 &#8211; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/94c956a6-8eee-472d-8613-1324746db4a7&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/94c956a6-8eee-472d-8613-1324746db4a7&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="290902" author="sergey" created="Mon, 1 Feb 2021 22:21:01 +0000"  >&lt;p&gt;Yes, Alex is right in his&#160;comment.&lt;br/&gt;
 qmt_dqacq0 holds quota mutex after starting transaction, so it doesn&apos;t wait for transaction.&lt;br/&gt;
 I went through the code related to handling this mutex but didn&apos;t find any obvious places where it can be leaved unlocked.&lt;br/&gt;
 Is it possible to gather debug logs or trigger a crash dump to continue analysis?&lt;/p&gt;</comment>
                            <comment id="290908" author="gerrit" created="Tue, 2 Feb 2021 00:08:43 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41384&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41384&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; tests: run testing on OST pools patch&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b80e2463f6fecad2406b4f2b02f4975976c35a8e&lt;/p&gt;</comment>
                            <comment id="290928" author="jamesanunez" created="Tue, 2 Feb 2021 04:20:45 +0000"  >&lt;p&gt;Sergey - I don&apos;t have a crash dump, but  debug logs are collected. Each node has a debug log, look for &lt;b&gt;.debug.&lt;/b&gt;; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/94c956a6-8eee-472d-8613-1324746db4a7&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/94c956a6-8eee-472d-8613-1324746db4a7&lt;/a&gt;. Is that what you&apos;re looking for/helpful or do we need to set debug to -1?&lt;/p&gt;</comment>
                            <comment id="290998" author="gerrit" created="Tue, 2 Feb 2021 18:39:04 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41390&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41390&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; tests: increase debug level sanity-sec 18&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 28ae41581255cb439ef545456f509bf581dce966&lt;/p&gt;</comment>
                            <comment id="291000" author="bzzz" created="Tue, 2 Feb 2021 18:42:45 +0000"  >&lt;p&gt;qti_lqes_write_lock() can be taking few locks. are those ordered somehow? otherwise, I guess, few threads can hit a deadlock.&lt;/p&gt;</comment>
                            <comment id="291003" author="sergey" created="Tue, 2 Feb 2021 18:58:38 +0000"  >&lt;blockquote&gt;&lt;p&gt;qti_lqes_write_lock() can be taking few locks. are those ordered somehow? otherwise, I guess, few threads can hit a deadlock&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;No, those are not ordered. I agree this could be one of possible reasons.&lt;br/&gt;
Let&apos;s look at logs with debug level -1 to be 100% sure in your guess.&lt;/p&gt;</comment>
                            <comment id="298856" author="gerrit" created="Thu, 15 Apr 2021 11:27:58 +0000"  >&lt;p&gt;Sergey Cheremencev (sergey.cheremencev@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43325&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43325&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; test: default PQ limit for a new user&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a5981a020b38b052018bed1447b090b206671a1a&lt;/p&gt;</comment>
                            <comment id="298858" author="gerrit" created="Thu, 15 Apr 2021 11:39:04 +0000"  >&lt;p&gt;Sergey Cheremencev (sergey.cheremencev@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/43326&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43326&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; quota: avoid nested lqe lookup&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 73845123b646501cd57886d6e178b65e794a0b0f&lt;/p&gt;</comment>
                            <comment id="308046" author="gerrit" created="Thu, 22 Jul 2021 01:50:11 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/43326/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43326/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14300&quot; title=&quot;sanity-sec test 18 hangs in txg_quiesce on MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14300&quot;&gt;&lt;del&gt;LU-14300&lt;/del&gt;&lt;/a&gt; quota: avoid nested lqe lookup&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 188112fc806c8c61d536ba3230b8d50f65e4f8fc&lt;/p&gt;</comment>
                            <comment id="309222" author="sergey" created="Wed, 4 Aug 2021 07:40:48 +0000"  >&lt;p&gt;The issue is solved by &lt;a href=&quot;https://review.whamcloud.com/43326/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43326/&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;James, do you agree to close the issue? I guess &lt;a href=&quot;https://review.whamcloud.com/#/c/41390/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/41390/&lt;/a&gt;&#160;should be abandoned.&lt;/p&gt;</comment>
                            <comment id="309253" author="jamesanunez" created="Wed, 4 Aug 2021 15:09:57 +0000"  >&lt;p&gt;It looks like we haven&apos;t seen this crash in a while. Thus, I&apos;m fine with closing this ticket.&lt;/p&gt;

&lt;p&gt;The patch &lt;a href=&quot;https://review.whamcloud.com/#/c/43325/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/43325/&lt;/a&gt; has not landed. Do we want to land this patch before closing this ticket?&lt;/p&gt;</comment>
                            <comment id="309332" author="sergey" created="Thu, 5 Aug 2021 09:36:22 +0000"  >&lt;p&gt;&lt;a href=&quot;https://review.whamcloud.com/#/c/43325/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/43325/&lt;/a&gt;&#160;is not needed as it has been landed as a part of&#160;&lt;a href=&quot;https://review.whamcloud.com/43326/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/43326/&lt;/a&gt;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i01iq7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>