<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:50:27 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12193] possible scheduling with spinlocks held in the quota paths</title>
                <link>https://jira.whamcloud.com/browse/LU-12193</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;this looks very dangerous:&lt;/p&gt;

&lt;p&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff815f11f9&amp;gt;&amp;#93;&lt;/span&gt; _cond_resched+0x29/0x40&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04f2c9b&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_check_set+0x16b/0x30a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04f5d2a&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_set_wait+0x15a/0x7b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8137da0d&amp;gt;&amp;#93;&lt;/span&gt; ? __raw_spin_lock_init+0x2d/0x50&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04ad520&amp;gt;&amp;#93;&lt;/span&gt; ? __ldlm_handle2lock+0x3f0/0x3f0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04ec1d0&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_prep_set+0x180/0x2b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04b3197&amp;gt;&amp;#93;&lt;/span&gt; ldlm_run_ast_work+0xd7/0x3d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04d6006&amp;gt;&amp;#93;&lt;/span&gt; ldlm_glimpse_locks+0x36/0xf0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0880d8a&amp;gt;&amp;#93;&lt;/span&gt; qmt_glimpse_lock.isra.2.constprop.4+0x52a/0xa70 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0884445&amp;gt;&amp;#93;&lt;/span&gt; qmt_glb_lock_notify+0x1e5/0x390 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087de2f&amp;gt;&amp;#93;&lt;/span&gt; qmt_set_with_lqe+0x35f/0x800 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087e2d0&amp;gt;&amp;#93;&lt;/span&gt; ? qmt_set_with_lqe+0x800/0x800 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087e326&amp;gt;&amp;#93;&lt;/span&gt; qmt_entry_iter_cb+0x56/0xa0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa000e53b&amp;gt;&amp;#93;&lt;/span&gt; cfs_hash_for_each_tight+0x10b/0x2e0 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa000e75e&amp;gt;&amp;#93;&lt;/span&gt; cfs_hash_for_each_safe+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087de7f&amp;gt;&amp;#93;&lt;/span&gt; qmt_set_with_lqe+0x3af/0x800 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087e4b8&amp;gt;&amp;#93;&lt;/span&gt; qmt_set.constprop.2+0x148/0x2b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0556dce&amp;gt;&amp;#93;&lt;/span&gt; ? barrier_entry+0x3e/0x180 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa087ec03&amp;gt;&amp;#93;&lt;/span&gt; qmt_quotactl+0x5e3/0x600 &lt;span class=&quot;error&quot;&gt;&amp;#91;lquota&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0a93400&amp;gt;&amp;#93;&lt;/span&gt; mdt_quotactl+0x290/0x770 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="55433">LU-12193</key>
            <summary>possible scheduling with spinlocks held in the quota paths</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="scherementsev">Sergey Cheremencev</assignee>
                                    <reporter username="bzzz">Alex Zhuravlev</reporter>
                        <labels>
                    </labels>
                <created>Thu, 18 Apr 2019 14:33:31 +0000</created>
                <updated>Sat, 14 Dec 2019 13:46:53 +0000</updated>
                            <resolved>Sat, 14 Dec 2019 13:46:53 +0000</resolved>
                                                    <fixVersion>Lustre 2.14.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="245997" author="bzzz" created="Thu, 18 Apr 2019 15:34:49 +0000"  >&lt;p&gt;I guess ideally we want to collect all affected lqe&apos;s on a local list using cfs_hash_for_each_safe(), then handle all the items on the list with no spinlock held. currently this scanning is serialized (to some extent) with cfs_hash_lock, but it should be fine to introduce a mutex wrapping cfs_hash_for_each_safe() and subsequent handling.&lt;/p&gt;</comment>
                            <comment id="249363" author="simmonsja" created="Mon, 17 Jun 2019 02:32:04 +0000"  >&lt;p&gt;Does patch&#160;&lt;a href=&quot;https://review.whamcloud.com/#/c/34389/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/34389/&lt;/a&gt;&#160;resolve this? It removes using the cfs_hash().&lt;/p&gt;</comment>
                            <comment id="251791" author="green" created="Mon, 22 Jul 2019 07:52:45 +0000"  >&lt;p&gt;the 34389 patch does not help with this issues, I see no visible changes.&lt;/p&gt;

&lt;p&gt;compare &lt;a href=&quot;http://testing.linuxhacker.ru:3333/lustre-reports/1480/testresults/sanity-quota-ldiskfs-DNE-centos7_x86_64-centos7_x86_64/oleg9-server-console.txt&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://testing.linuxhacker.ru:3333/lustre-reports/1480/testresults/sanity-quota-ldiskfs-DNE-centos7_x86_64-centos7_x86_64/oleg9-server-console.txt&lt;/a&gt; (search for sleeping) and&lt;br/&gt;
&lt;a href=&quot;http://testing.linuxhacker.ru:3333/lustre-reports/685/testresults/sanity-quota-ldiskfs-DNE-centos7_x86_64-centos7_x86_64/oleg88-server-console.txt&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://testing.linuxhacker.ru:3333/lustre-reports/685/testresults/sanity-quota-ldiskfs-DNE-centos7_x86_64-centos7_x86_64/oleg88-server-console.txt&lt;/a&gt; (search for sleeping)&lt;/p&gt;</comment>
                            <comment id="251792" author="green" created="Mon, 22 Jul 2019 07:54:16 +0000"  >&lt;p&gt;Also it looks like this is a 100% crash on rhel8:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  497.202928] Lustre: DEBUG MARKER: == sanity-quota test 3: Block soft limit (start timer, timer goes off, stop timer) =================== 03:02:23 (1563778943)
[  503.400751] BUG: sleeping function called from invalid context at kernel/locking/rwsem.c:69
[  503.403144] in_atomic(): 1, irqs_disabled(): 0, pid: 14758, name: mdt00_004
[  503.404213] INFO: lockdep is turned off.
[  503.404655] CPU: 3 PID: 14758 Comm: mdt00_004 Kdump: loaded Tainted: G        W  O     --------- -  - 4.18.0-debug #8
[  503.405815] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[  503.406408] Call Trace:
[  503.406681]  dump_stack+0x106/0x175
[  503.407129]  ___might_sleep.cold.50+0xfc/0x12a
[  503.407675]  __might_sleep+0x5b/0xc0
[  503.408164]  down_write+0x35/0x120
[  503.408694]  qmt_set_with_lqe+0x140/0xb10 [lquota]
[  503.409216]  ? qmt_set_with_lqe+0xb10/0xb10 [lquota]
[  503.409845]  qmt_entry_iter_cb+0x4c/0xb0 [lquota]
[  503.410369]  cfs_hash_for_each_tight+0x15c/0x430 [libcfs]
[  503.411151]  cfs_hash_for_each_safe+0x17/0x20 [libcfs]
[  503.411893]  qmt_set_with_lqe+0x53d/0xb10 [lquota]
[  503.412858]  qmt_set.constprop.8+0x180/0x390 [lquota]
[  503.413399]  qmt_quotactl+0x35f/0x690 [lquota]
[  503.414242]  mdt_quotactl+0x366/0x9a0 [mdt]
[  503.415143]  tgt_handle_request0+0xdf/0x890 [ptlrpc]
[  503.416130]  tgt_request_handle+0x3c6/0x1ae0 [ptlrpc]
[  503.417108]  ptlrpc_server_handle_request+0x634/0x11c0 [ptlrpc]
[  503.418138]  ptlrpc_main+0xd7f/0x1470 [ptlrpc]
[  503.418968]  ? ptlrpc_register_service+0x14d0/0x14d0 [ptlrpc]
[  503.419662]  kthread+0x190/0x1c0
[  503.420027]  ? kthread_create_worker+0x90/0x90
[  503.420879]  ret_from_fork+0x24/0x50
[  503.421939] BUG: scheduling while atomic: mdt00_004/14758/0x00000003
[  503.423025] INFO: lockdep is turned off.
[  503.423735] Modules linked in: zfs(O) zunicode(O) zlua(O) zcommon(O) znvpair(O) zavl(O) icp(O) spl(O) lustre(O) ofd(O) osp(O) lod(O) ost(O) mdt(O) mdd(O) mgs(O) osd_ldiskfs(O) ldiskfs(O) lquota(O) lfsck(O) obdecho(O) mgc(O) lov(O) mdc(O) osc(O) lmv(O) fid(O) fld(O) ptlrpc_gss(O) ptlrpc(O) obdclass(O) ksocklnd(O) lnet(O) libcfs(O) dm_flakey rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver i2c_piix4 pcspkr squashfs ip_tables ata_generic serio_raw ata_piix libata dm_mirror dm_region_hash dm_log dm_mod
[  503.430905] CPU: 3 PID: 14758 Comm: mdt00_004 Kdump: loaded Tainted: G        W  O     --------- -  - 4.18.0-debug #8
[  503.432722] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[  503.433736] Call Trace:
[  503.434079]  dump_stack+0x106/0x175
[  503.434558]  __schedule_bug.cold.48+0x90/0xc5
[  503.435288]  __schedule+0xa14/0xfc0
[  503.436010]  ? _raw_spin_lock_irqsave+0xd2/0x130
[  503.436853]  schedule+0x5d/0x100
[  503.437500]  schedule_timeout+0x2db/0x8f0
[  503.438216]  ? __next_timer_interrupt+0x130/0x130
[  503.439366]  ? trace_hardirqs_on+0x19/0x30
[  503.440565]  ? ptlrpc_set_wait+0x60b/0xab0 [ptlrpc]
[  503.441599]  ptlrpc_set_wait+0x678/0xab0 [ptlrpc]
[  503.442359]  ? try_to_wake_up+0x790/0x790
[  503.443201]  ldlm_run_ast_work+0x17a/0x4e0 [ptlrpc]
[  503.444196]  ldlm_glimpse_locks+0x46/0x130 [ptlrpc]
[  503.444999]  qmt_glimpse_lock.isra.15.constprop.17+0x2d6/0x830 [lquota]
[  503.446253]  qmt_glb_lock_notify+0x27d/0x480 [lquota]
[  503.447275]  qmt_set_with_lqe+0x4d3/0xb10 [lquota]
[  503.448283]  ? qmt_set_with_lqe+0xb10/0xb10 [lquota]
[  503.449006]  qmt_entry_iter_cb+0x4c/0xb0 [lquota]
[  503.449466]  cfs_hash_for_each_tight+0x15c/0x430 [libcfs]
[  503.450036]  cfs_hash_for_each_safe+0x17/0x20 [libcfs]
[  503.451217]  qmt_set_with_lqe+0x53d/0xb10 [lquota]
[  503.452016]  qmt_set.constprop.8+0x180/0x390 [lquota]
[  503.452623]  qmt_quotactl+0x35f/0x690 [lquota]
[  503.453576]  mdt_quotactl+0x366/0x9a0 [mdt]
[  503.454672]  tgt_handle_request0+0xdf/0x890 [ptlrpc]
[  503.455935]  tgt_request_handle+0x3c6/0x1ae0 [ptlrpc]
[  503.457314]  ptlrpc_server_handle_request+0x634/0x11c0 [ptlrpc]
[  503.458831]  ptlrpc_main+0xd7f/0x1470 [ptlrpc]
[  503.459930]  ? ptlrpc_register_service+0x14d0/0x14d0 [ptlrpc]
[  503.460906]  kthread+0x190/0x1c0
[  503.461495]  ? kthread_create_worker+0x90/0x90
[  503.462400]  ret_from_fork+0x24/0x50
[  503.463289] LNetError: 14758:0:(lib-move.c:764:lnet_ni_send()) ASSERTION( !((preempt_count() &amp;amp; ((((1UL &amp;lt;&amp;lt; (4))-1) &amp;lt;&amp;lt; ((0 + 8) + 8)) | (((1UL &amp;lt;&amp;lt; (8))-1) &amp;lt;&amp;lt; (0 + 8)) | (((1UL &amp;lt;&amp;lt; (1))-1) &amp;lt;&amp;lt; (((0 + 8) + 8) + 4))))) ) failed: 
[  503.467427] LNetError: 14758:0:(lib-move.c:764:lnet_ni_send()) LBUG
[  503.468616] Kernel panic - not syncing: LBUG in interrupt.

[  503.469746] CPU: 3 PID: 14758 Comm: mdt00_004 Kdump: loaded Tainted: G        W  O     --------- -  - 4.18.0-debug #8
[  503.471150] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
[  503.471972] Call Trace:
[  503.472235]  dump_stack+0x106/0x175
[  503.472697]  panic+0x147/0x3af
[  503.473160]  ? cfs_trace_unlock_tcd+0x5c/0xe0 [libcfs]
[  503.473934]  ? cfs_trace_unlock_tcd+0x5c/0xe0 [libcfs]
[  503.474694]  lbug_with_loc.cold.0+0x14/0x28 [libcfs]
[  503.475427]  lnet_ni_send+0xb9/0x110 [lnet]
[  503.476086]  lnet_send+0xb6/0x260 [lnet]
[  503.476708]  LNetPut+0x513/0xef0 [lnet]
[  503.477427]  ptl_send_buf+0x265/0x6a0 [ptlrpc]
[  503.478227]  ptlrpc_send_reply+0x3a7/0xb70 [ptlrpc]
[  503.479006]  target_send_reply_msg+0x192/0x350 [ptlrpc]
[  503.479822]  target_send_reply+0x492/0xa00 [ptlrpc]
[  503.480626]  tgt_handle_request0+0x164/0x890 [ptlrpc]
[  503.481506]  tgt_request_handle+0x3c6/0x1ae0 [ptlrpc]
[  503.482362]  ptlrpc_server_handle_request+0x634/0x11c0 [ptlrpc]
[  503.483303]  ptlrpc_main+0xd7f/0x1470 [ptlrpc]
[  503.483963]  ? ptlrpc_register_service+0x14d0/0x14d0 [ptlrpc]
[  503.484532]  kthread+0x190/0x1c0
[  503.484877]  ? kthread_create_worker+0x90/0x90
[  503.485352]  ret_from_fork+0x24/0x50
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="258495" author="gerrit" created="Tue, 19 Nov 2019 13:44:09 +0000"  >&lt;p&gt;Sergey Cheremencev (c17829@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/36795&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36795&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12193&quot; title=&quot;possible scheduling with spinlocks held in the quota paths&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12193&quot;&gt;&lt;del&gt;LU-12193&lt;/del&gt;&lt;/a&gt; quota: use rw_sem to protect lqs_hash&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: bc9c8b85229ab7aa21cad04f271903b180dcd2b8&lt;/p&gt;</comment>
                            <comment id="258497" author="simmonsja" created="Tue, 19 Nov 2019 14:29:52 +0000"  >&lt;p&gt;Lets move the code to rhashtable instead. We get the benefit of the light weight rcu locks so it will scale way better than what libcfs hash can do.&lt;/p&gt;</comment>
                            <comment id="258503" author="simmonsja" created="Tue, 19 Nov 2019 15:56:03 +0000"  >&lt;p&gt;Another idea I had as well is using xarray as a potential alternative to rhashtable. Which one you use depends on the data arrangement. Xarrays are optimized for densely packed data. For the case of quotas we are using uid / gid and projid which tend to sequential. The other benefit is that you can &apos;mark&apos; the data. This means that an entry in the Xarray for example 1000 could be labeled as a mix of UID or GID or PROJID. This could reduce all the &lt;span class=&quot;error&quot;&gt;&amp;#91;LL_MAXQUOTAS&amp;#93;&lt;/span&gt; down to one data structure. If the data is not densely pack then rhashtable is the way to go.&lt;/p&gt;</comment>
                            <comment id="258507" author="sergey" created="Tue, 19 Nov 2019 16:24:41 +0000"  >&lt;blockquote&gt;&lt;p&gt;Lets move the code to rhashtable instead. We get the benefit of the light weight rcu locks so it will scale way better than what libcfs hash can do.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;To fix current problem I would prefer a small and clear fix, that adds rw_sem locking to cfs_hash and only changes flags that used to create lqs_hash.&lt;br/&gt;
 I am ok with suggested solutions, but they look more like optimizations or improvements. I am afraid to fix one problem and introduce several regressions.&lt;/p&gt;</comment>
                            <comment id="259848" author="gerrit" created="Sat, 14 Dec 2019 05:58:01 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/36795/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36795/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12193&quot; title=&quot;possible scheduling with spinlocks held in the quota paths&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12193&quot;&gt;&lt;del&gt;LU-12193&lt;/del&gt;&lt;/a&gt; quota: use rw_sem to protect lqs_hash&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f3cdf905c522837e2cdce779a03b4ecf16313c65&lt;/p&gt;</comment>
                            <comment id="259882" author="pjones" created="Sat, 14 Dec 2019 13:46:53 +0000"  >&lt;p&gt;Landed for 2.14&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00f0f:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>