<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:41:09 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11125] ofd_create_hdl() destroys_in_progress already cleared</title>
                <link>https://jira.whamcloud.com/browse/LU-11125</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Servers were restarted and appeared to recover normally.&#160; They briefly appeared to be handling the same (heavy) workload from before they were powered off, then started logging the &quot;system was overloaded&quot; message.&#160; The kernel then reported several stacks like this:&lt;/p&gt;

&lt;p&gt;INFO: task ll_ost00_007:108440 blocked for more than 120 seconds.&lt;br/&gt;
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.&lt;br/&gt;
 ll_ost00_007    D ffff8ba4dc35bf40     0 108440      2 0x00000080&lt;br/&gt;
Call Trace:&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaad38919&amp;gt;&amp;#93;&lt;/span&gt; schedule_preempt_disabled+0x39/0x90&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaad3654f&amp;gt;&amp;#93;&lt;/span&gt; __mutex_lock_slowpath+0x10f/0x250&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaad357f2&amp;gt;&amp;#93;&lt;/span&gt; mutex_lock+0x32/0x42&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1669afb&amp;gt;&amp;#93;&lt;/span&gt; ofd_create_hdl+0xdcb/0x2090 &lt;span class=&quot;error&quot;&gt;&amp;#91;ofd&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1322007&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_msg_add_version+0x27/0xa0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc132235f&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_pack_reply_v2+0x14f/0x290 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1322691&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_pack_reply+0x11/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc138653a&amp;gt;&amp;#93;&lt;/span&gt; tgt_request_handle+0x92a/0x1370 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc132db5b&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_server_handle_request+0x23b/0xaa0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc132b26b&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_wait_event+0xab/0x350 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaa6d5c32&amp;gt;&amp;#93;&lt;/span&gt; ? default_wake_function+0x12/0x20&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaa6cb01b&amp;gt;&amp;#93;&lt;/span&gt; ? __wake_up_common+0x5b/0x90&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1331c70&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_main+0xae0/0x1e90 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1331190&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_register_service+0xe30/0xe30 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaa6c0ad1&amp;gt;&amp;#93;&lt;/span&gt; kthread+0xd1/0xe0&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaa6c0a00&amp;gt;&amp;#93;&lt;/span&gt; ? insert_kthread_work+0x40/0x40&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaad44837&amp;gt;&amp;#93;&lt;/span&gt; ret_from_fork_nospec_begin+0x21/0x21&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffaa6c0a00&amp;gt;&amp;#93;&lt;/span&gt; ? insert_kthread_work+0x40/0x40&lt;/p&gt;

&lt;p&gt;&#160;And lustre began reporting:&lt;br/&gt;
 LustreError: 108448:0:(ofd_dev.c:1627:ofd_create_hdl()) lquake-OST0003:&lt;span class=&quot;error&quot;&gt;&amp;#91;27917288460&amp;#93;&lt;/span&gt; destroys_in_progress already cleared&lt;/p&gt;


&lt;p&gt;&#160;&lt;/p&gt;</description>
                <environment>lustre-2.10.4_1.chaos-1.ch6.x86_64 servers&lt;br/&gt;
RHEL 7.5&lt;br/&gt;
DNE1 file system&lt;br/&gt;
</environment>
        <key id="52650">LU-11125</key>
            <summary>ofd_create_hdl() destroys_in_progress already cleared</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="tappro">Mikhail Pershin</assignee>
                                    <reporter username="ofaaland">Olaf Faaland</reporter>
                        <labels>
                            <label>llnl</label>
                    </labels>
                <created>Fri, 6 Jul 2018 07:08:17 +0000</created>
                <updated>Thu, 3 Jan 2019 19:17:17 +0000</updated>
                            <resolved>Sat, 1 Sep 2018 05:57:16 +0000</resolved>
                                    <version>Lustre 2.10.4</version>
                                    <fixVersion>Lustre 2.12.0</fixVersion>
                    <fixVersion>Lustre 2.10.6</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="229996" author="ofaaland" created="Fri, 6 Jul 2018 07:09:25 +0000"  >&lt;p&gt;Note this is not the same system as the one we reported an issue with earlier today.&lt;/p&gt;</comment>
                            <comment id="229999" author="ofaaland" created="Fri, 6 Jul 2018 07:21:34 +0000"  >&lt;p&gt;At the same time this was occurring, an aarch64 system running a 2.11 client was trying to connect unsuccessfully with these servers (the file system is mounted on the 2.11 nodes but the map_on_demand setting was changed on the servers and the connection attempts are all failing).&lt;/p&gt;

&lt;p&gt;/tmp/lustre-log.XXX files were being created, with contents like this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;00000800:00000100:13.0:1530842270.070848:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00000800:00000100:13.0:1530842271.070330:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: reconnect (invalid service id), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
00000800:00000100:13.0:1530842271.070337:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00000800:00000100:13.0:1530842272.070954:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: reconnect (invalid service id), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
00000800:00000100:13.0:1530842272.070957:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00000800:00000100:13.0:1530842273.071332:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: reconnect (invalid service id), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
00000800:00000100:13.0:1530842273.071336:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00000800:00000100:5.0:1530842274.071750:0:70765:0:(o2iblnd_cb.c:490:kiblnd_rx_complete()) Rx from 172.19.1.23@o2ib100 failed: 5
00000800:00000100:13.0:1530842274.071790:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: reconnect (invalid service id), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
00000800:00000100:13.0:1530842274.071793:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00002000:00020000:9.0:1530842274.090444:0:102598:0:(ofd_dev.c:1627:ofd_create_hdl()) lquake-OST0002:[28991030285] destroys_in_progress already cleared
00000100:00000400:8.0:1530842274.090453:0:102596:0:(service.c:2114:ptlrpc_server_handle_request()) @@@ Request took longer than estimated (755:203s); client may timeout.  req@ffff8bb9b7e93850 x1605203337942624/t0(0) o5-&amp;gt;lquake-MDT0003-mdtlov_UUID@172.19.1.114@o2ib100:296/0 lens 432/400 e 0 to 0 dl 1530842071 ref 1 fl Complete:/0/0 rc 0/0
00000800:00000100:13.0:1530842275.071508:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: reconnect (invalid service id), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
00000800:00000100:13.0:1530842275.071516:0:114359:0:(o2iblnd_cb.c:2672:kiblnd_rejected()) 172.19.1.23@o2ib100 rejected: no listener at 987
00000800:00000400:8.0:1530842276.068989:0:70764:0:(o2iblnd_cb.c:3192:kiblnd_check_conns()) Timed out tx for 172.19.1.23@o2ib100: 25 seconds
00000800:00000100:13.0:1530842276.070940:0:114359:0:(o2iblnd_cb.c:2646:kiblnd_check_reconnect()) 172.19.1.23@o2ib100: don&apos;t reconnect (no need), 12, 12, msg_size: 4096, queue_depth: 8/-1, max_frags: 256/-1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OST0002 and OST0003 both reported &quot;destroys_in_progress already cleared&quot;, and both reported dumping /tmp/lustre-log files, but the node hosting OST0002 was rebooted and those log files were lost.  So the content above is from a different OST, but it was also reporting the same stacks and console messages.&lt;/p&gt;</comment>
                            <comment id="230018" author="pjones" created="Fri, 6 Jul 2018 17:35:12 +0000"  >&lt;p&gt;Mike&lt;/p&gt;

&lt;p&gt;Could you please investigate?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="230064" author="tappro" created="Mon, 9 Jul 2018 15:16:39 +0000"  >&lt;p&gt;Olaf, are there any other bad effects except stack dumps and LustreError messages? Do OSTs works normal after all? You mentioned that OST0002 was rebooted, was that because of this issue or planned restart?&lt;/p&gt;</comment>
                            <comment id="230078" author="ofaaland" created="Mon, 9 Jul 2018 20:05:16 +0000"  >&lt;p&gt;Mike, at the time several IORs were running and they all appeared not to resume progress after recovery completed.&#160; ltop was showing no activity on the file system, and the IORs were not finishing at the expected pace.&lt;/p&gt;

&lt;p&gt;Later, the problem seemed to resolve itself - all but one of the IORs completed successfully, but several took 10-20 times longer than they should have.&#160; I&apos;m looking into that now to get more information.&lt;/p&gt;

&lt;p&gt;One compute node crashed, but due to an unrelated problem we have neither the stack nor the crash dump.&#160; We&apos;re looking into that problem as well.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230079" author="ofaaland" created="Mon, 9 Jul 2018 20:51:12 +0000"  >&lt;p&gt;I was mistaken about OST0002 being rebooted a second time.&#160; The file system was restarted just once, and the symptoms described here were seen after they came up and completed recovery.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230080" author="ofaaland" created="Mon, 9 Jul 2018 20:55:25 +0000"  >&lt;p&gt;I am going to attempt to re-create this without the complication of the Lustre 2.11 arm system mounting the file system.&lt;/p&gt;</comment>
                            <comment id="230092" author="tappro" created="Tue, 10 Jul 2018 08:50:50 +0000"  >&lt;p&gt;That message - &quot;destroys_in_progress already cleared&quot; - related just to MDT-&amp;gt;OST reconnects and may happen when MDT reconnects due to some reasons but OST is still performing orphan destroys from previous connect.  I see in logs in description this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;00002000:00020000:9.0:1530842274.090444:0:102598:0:(ofd_dev.c:1627:ofd_create_hdl()) lquake-OST0002:[28991030285] destroys_in_progress already cleared
00000100:00000400:8.0:1530842274.090453:0:102596:0:(service.c:2114:ptlrpc_server_handle_request()) @@@ Request took longer than estimated (755:203s); client may timeout.  req@ffff8bb9b7e93850 x1605203337942624/t0(0) o5-&amp;gt;lquake-MDT0003-mdtlov_UUID@172.19.1.114@o2ib100:296/0 lens 432/400 e 0 to 0 dl 1530842071 ref 1 fl Complete:/0/0 rc 0/0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looks like second thread &lt;span class=&quot;error&quot;&gt;&amp;#91;102598&amp;#93;&lt;/span&gt; was waiting for the first &lt;span class=&quot;error&quot;&gt;&amp;#91;102596&amp;#93;&lt;/span&gt; to complete OST_CREATE request with orphan destroys. And that first one took quite long time. It seems the problem is in that long orphan destroy, something was blocking it for quite a time. That can be just result of overall high load upon OST restart &lt;/p&gt;</comment>
                            <comment id="230136" author="ofaaland" created="Tue, 10 Jul 2018 23:43:21 +0000"  >&lt;p&gt;Thank you for the explanation.&lt;/p&gt;
&lt;blockquote&gt;&lt;p&gt;It seems the problem is in that long orphan destroy, something was blocking it for quite a time. That can be just result of overall high load upon OST restart&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;It sounds like destroys_in_progress should be a CDEBUG message, since it can occur under normal operating conditions.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230161" author="tappro" created="Wed, 11 Jul 2018 11:16:26 +0000"  >&lt;p&gt;It still may indicate the problem there, there are two different operations are possible on OST restart - MDT initiates orphan destroys, but also new precreates may be initiated by write replays.  Both uses the same mutex &apos;os_create_lock&apos; though one process destroys non-used objects and other creates new objects. That causes high contention on that mutex exactly on OST restart and I am checking now is that possible to separate locking for both processes. That would relieve load on that mutex.&lt;/p&gt;</comment>
                            <comment id="231849" author="ofaaland" created="Mon, 13 Aug 2018 05:49:14 +0000"  >&lt;p&gt;Hello Mikhail, have you learned anything more of interest?&#160; Thanks.&lt;/p&gt;</comment>
                            <comment id="231861" author="tappro" created="Mon, 13 Aug 2018 14:31:01 +0000"  >&lt;p&gt;Olaf, this message is not an error so it can be just made less noisy as first step and then I will create new improvement ticket to investigate this area thoughtfully to lower contention on that mutex during OST recovery&lt;/p&gt;</comment>
                            <comment id="231863" author="gerrit" created="Mon, 13 Aug 2018 14:38:56 +0000"  >&lt;p&gt;Mike Pershin (mpershin@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/32985&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32985&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11125&quot; title=&quot;ofd_create_hdl() destroys_in_progress already cleared&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11125&quot;&gt;&lt;del&gt;LU-11125&lt;/del&gt;&lt;/a&gt; ofd: decrease message level&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 68c740bcc319d019ec545eb50f25417a4bd9f7a9&lt;/p&gt;</comment>
                            <comment id="232880" author="gerrit" created="Sat, 1 Sep 2018 03:29:01 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/32985/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32985/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11125&quot; title=&quot;ofd_create_hdl() destroys_in_progress already cleared&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11125&quot;&gt;&lt;del&gt;LU-11125&lt;/del&gt;&lt;/a&gt; ofd: decrease message level&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: c25c4d60ac5fbede50a41da12f91de6e402b3c68&lt;/p&gt;</comment>
                            <comment id="232895" author="pjones" created="Sat, 1 Sep 2018 05:57:16 +0000"  >&lt;p&gt;This first patch landed for 2.12. Mike, can you please link in the ticket tracking the subsequent improvement?&lt;/p&gt;</comment>
                            <comment id="232909" author="gerrit" created="Sat, 1 Sep 2018 15:38:58 +0000"  >&lt;p&gt;Minh Diep (mdiep@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/33100&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33100&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11125&quot; title=&quot;ofd_create_hdl() destroys_in_progress already cleared&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11125&quot;&gt;&lt;del&gt;LU-11125&lt;/del&gt;&lt;/a&gt; ofd: decrease message level&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: d266dbc4d62ab018d345159bb388f7f44fdca9d3&lt;/p&gt;</comment>
                            <comment id="233355" author="gerrit" created="Tue, 11 Sep 2018 20:38:30 +0000"  >&lt;p&gt;John L. Hammond (jhammond@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/33100/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33100/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11125&quot; title=&quot;ofd_create_hdl() destroys_in_progress already cleared&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11125&quot;&gt;&lt;del&gt;LU-11125&lt;/del&gt;&lt;/a&gt; ofd: decrease message level&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 483e2e745c5294ae6dc2b6f85cc8976eb9f90fa3&lt;/p&gt;</comment>
                            <comment id="233720" author="ofaaland" created="Tue, 18 Sep 2018 23:14:26 +0000"  >&lt;p&gt;Is there a ticket for reducing the contention on os_create_lock?  Thanks.&lt;/p&gt;</comment>
                            <comment id="233746" author="ofaaland" created="Wed, 19 Sep 2018 15:40:12 +0000"  >&lt;blockquote&gt;&lt;p&gt;Is there a ticket for reducing the contention on os_create_lock? Thanks.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11399&quot; title=&quot;use separate locks for orphan destroy and objects re-create at OFD&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11399&quot;&gt;LU-11399&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="53355">LU-11399</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzytr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>