<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:20:35 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8791] Suspicious deadlock on OST stack</title>
                <link>https://jira.whamcloud.com/browse/LU-8791</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This problem occurred in soaked test.&lt;/p&gt;

&lt;p&gt;There are two suspicious call trace which leads me to suspect there exists a deadlock in the code:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNet: Service thread pid 30124 was inactive for 1200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
LNet: Skipped 3 previous similar messages
Pid: 30124, comm: ll_ost_io00_116

Call Trace:
 [&amp;lt;ffffffff81178260&amp;gt;] ? cache_alloc_refill+0x1c0/0x240
 [&amp;lt;ffffffff81291489&amp;gt;] ? cpumask_next_and+0x29/0x50
 [&amp;lt;ffffffff8153cc55&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
 [&amp;lt;ffffffff8153cde6&amp;gt;] rwsem_down_read_failed+0x26/0x30
 [&amp;lt;ffffffff8129e8f4&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
 [&amp;lt;ffffffff8153c2e4&amp;gt;] ? down_read+0x24/0x30
 [&amp;lt;ffffffffa0f13917&amp;gt;] osd_attr_get+0x77/0x2e0 [osd_zfs]
 [&amp;lt;ffffffffa1022301&amp;gt;] ofd_attr_handle_ugid+0x161/0x430 [ofd]
 [&amp;lt;ffffffffa1027ebd&amp;gt;] ofd_write_attr_set+0xad/0x9e0 [ofd]
 [&amp;lt;ffffffffa102933f&amp;gt;] ofd_commitrw_write+0x23f/0x10a0 [ofd]
 [&amp;lt;ffffffffa102df1d&amp;gt;] ? ofd_fmd_find_nolock+0xad/0xd0 [ofd]
 [&amp;lt;ffffffffa102a75f&amp;gt;] ofd_commitrw+0x5bf/0xb10 [ofd]
 [&amp;lt;ffffffff81149461&amp;gt;] ? kzfree+0x31/0x40
 [&amp;lt;ffffffffa079d5c1&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa0a17ad4&amp;gt;] obd_commitrw+0x114/0x380 [ptlrpc]
 [&amp;lt;ffffffffa0a20870&amp;gt;] tgt_brw_write+0xc70/0x1530 [ptlrpc]
 [&amp;lt;ffffffff8105e9b6&amp;gt;] ? enqueue_task+0x66/0x80
 [&amp;lt;ffffffff8105ab8d&amp;gt;] ? check_preempt_curr+0x6d/0x90
 [&amp;lt;ffffffff810674be&amp;gt;] ? try_to_wake_up+0x24e/0x3e0
 [&amp;lt;ffffffffa0974e20&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa0a1f0ac&amp;gt;] tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa09cb7b1&amp;gt;] ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffff81539b0e&amp;gt;] ? thread_return+0x4e/0x7d0
 [&amp;lt;ffffffffa09caa80&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a138e&amp;gt;] kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff810a12f0&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And then another one:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Call Trace:
 [&amp;lt;ffffffffa015208c&amp;gt;] ? mlx4_ib_post_send+0x4fc/0x1370 [mlx4_ib]
 [&amp;lt;ffffffff8153cc55&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
 [&amp;lt;ffffffff8153cde6&amp;gt;] rwsem_down_read_failed+0x26/0x30
 [&amp;lt;ffffffff8129e8f4&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
 [&amp;lt;ffffffff8153c2e4&amp;gt;] ? down_read+0x24/0x30
 [&amp;lt;ffffffffa037e9b0&amp;gt;] dmu_buf_hold_array_by_dnode+0x50/0x4a0 [zfs]
 [&amp;lt;ffffffffa037f85b&amp;gt;] dmu_buf_hold_array_by_bonus+0x6b/0x90 [zfs]
 [&amp;lt;ffffffffa0f149af&amp;gt;] osd_bufs_get+0x4bf/0xb50 [osd_zfs]
 [&amp;lt;ffffffffa07c0af6&amp;gt;] ? lu_object_find+0x16/0x20 [obdclass]
 [&amp;lt;ffffffffa102893f&amp;gt;] ofd_preprw_read+0x14f/0x910 [ofd]
 [&amp;lt;ffffffffa102b008&amp;gt;] ofd_preprw+0x358/0x14f0 [ofd]
 [&amp;lt;ffffffffa0a1774f&amp;gt;] obd_preprw+0x10f/0x380 [ptlrpc]
 [&amp;lt;ffffffffa0a217c5&amp;gt;] tgt_brw_read+0x695/0x11d0 [ptlrpc]
 [&amp;lt;ffffffffa09e4194&amp;gt;] ? sptlrpc_svc_alloc_rs+0x74/0x360 [ptlrpc]
 [&amp;lt;ffffffffa09bab8b&amp;gt;] ? lustre_pack_reply_v2+0x1eb/0x280 [ptlrpc]
 [&amp;lt;ffffffffa09bacc6&amp;gt;] ? lustre_pack_reply_flags+0xa6/0x1e0 [ptlrpc]
 [&amp;lt;ffffffffa067ec8a&amp;gt;] ? lc_watchdog_touch+0x7a/0x190 [libcfs]
 [&amp;lt;ffffffffa0a1f0ac&amp;gt;] tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa09cb7b1&amp;gt;] ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffff8106ee50&amp;gt;] ? pick_next_task_fair+0xd0/0x130
 [&amp;lt;ffffffff81539896&amp;gt;] ? schedule+0x176/0x3a0
 [&amp;lt;ffffffffa09caa80&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a138e&amp;gt;] kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff810a12f0&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20

Pid: 29297, comm: ll_ost_io00_003
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Two more:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;ll_ost_io00_0 D 0000000000000003     0 29439      2 0x00000080
 ffff8803d6a1f6f0 0000000000000046 0000000000000000 ffff8803d6a1f6b4
 0000000000000000 ffff88043fe82400 00006a78551f8e00 ffff8800387f5a00
 0000000000002c62 0000000106f6922c ffff8803d6a1bad8 ffff8803d6a1ffd8
Call Trace:
 [&amp;lt;ffffffffa0314e5d&amp;gt;] cv_wait_common+0x11d/0x130 [spl]
 [&amp;lt;ffffffff810a1820&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffffa0314ec5&amp;gt;] __cv_wait+0x15/0x20 [spl]
 [&amp;lt;ffffffffa03757fb&amp;gt;] dbuf_read+0x3bb/0x850 [zfs]
 [&amp;lt;ffffffff8153afdb&amp;gt;] ? mutex_lock+0x2b/0x50
 [&amp;lt;ffffffffa0372a13&amp;gt;] ? dbuf_find+0x193/0x1a0 [zfs]
 [&amp;lt;ffffffffa0377c78&amp;gt;] dmu_buf_will_dirty+0x58/0xc0 [zfs]
 [&amp;lt;ffffffffa0377d37&amp;gt;] dbuf_new_size+0x57/0x1c0 [zfs]
 [&amp;lt;ffffffffa039259c&amp;gt;] dnode_set_blksz+0x30c/0x330 [zfs]
 [&amp;lt;ffffffffa037cbb8&amp;gt;] dmu_object_set_blocksize+0x48/0x70 [zfs]
 [&amp;lt;ffffffffa0f15e00&amp;gt;] osd_write_commit+0x770/0x970 [osd_zfs]
 [&amp;lt;ffffffffa102980b&amp;gt;] ofd_commitrw_write+0x70b/0x10a0 [ofd]
 [&amp;lt;ffffffffa102a75f&amp;gt;] ofd_commitrw+0x5bf/0xb10 [ofd]
 [&amp;lt;ffffffff81149461&amp;gt;] ? kzfree+0x31/0x40
 [&amp;lt;ffffffffa079d5c1&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa0a17ad4&amp;gt;] obd_commitrw+0x114/0x380 [ptlrpc]
 [&amp;lt;ffffffffa0a20870&amp;gt;] tgt_brw_write+0xc70/0x1530 [ptlrpc]
 [&amp;lt;ffffffff8105e9b6&amp;gt;] ? enqueue_task+0x66/0x80
 [&amp;lt;ffffffff8105ab8d&amp;gt;] ? check_preempt_curr+0x6d/0x90
 [&amp;lt;ffffffff810674be&amp;gt;] ? try_to_wake_up+0x24e/0x3e0
 [&amp;lt;ffffffffa0974e20&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa0a1f0ac&amp;gt;] tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa09cb7b1&amp;gt;] ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffff8106ee50&amp;gt;] ? pick_next_task_fair+0xd0/0x130
 [&amp;lt;ffffffff81539896&amp;gt;] ? schedule+0x176/0x3a0
 [&amp;lt;ffffffffa09caa80&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a138e&amp;gt;] kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff810a12f0&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;ll_ost_io00_1 D 0000000000000005     0 29763      2 0x00000080
 ffff88021676b768 0000000000000046 0000000000000000 ffff88021676b72c
 ffff880400000000 ffff88043fe82800 00006a78551f88c2 ffff8800387f5a00
 0000000000002c58 0000000106f6922c ffff8803d27b9ad8 ffff88021676bfd8
Call Trace:
 [&amp;lt;ffffffffa030db8e&amp;gt;] ? spl_kmem_zalloc+0x8e/0x190 [spl]
 [&amp;lt;ffffffff8153cc55&amp;gt;] rwsem_down_failed_common+0x95/0x1d0
 [&amp;lt;ffffffff8153afce&amp;gt;] ? mutex_lock+0x1e/0x50
 [&amp;lt;ffffffff8153cde6&amp;gt;] rwsem_down_read_failed+0x26/0x30
 [&amp;lt;ffffffff8129e8f4&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
 [&amp;lt;ffffffff8153c2e4&amp;gt;] ? down_read+0x24/0x30
 [&amp;lt;ffffffffa0376bb1&amp;gt;] dbuf_dirty+0x381/0x980 [zfs]
 [&amp;lt;ffffffffa037728a&amp;gt;] dbuf_assign_arcbuf+0xda/0x2b0 [zfs]
 [&amp;lt;ffffffffa037f679&amp;gt;] dmu_assign_arcbuf+0x149/0x1f0 [zfs]
 [&amp;lt;ffffffffa0f15ac1&amp;gt;] osd_write_commit+0x431/0x970 [osd_zfs]
 [&amp;lt;ffffffffa102980b&amp;gt;] ofd_commitrw_write+0x70b/0x10a0 [ofd]
 [&amp;lt;ffffffffa102a75f&amp;gt;] ofd_commitrw+0x5bf/0xb10 [ofd]
 [&amp;lt;ffffffff81149461&amp;gt;] ? kzfree+0x31/0x40
 [&amp;lt;ffffffffa079d5c1&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa0a17ad4&amp;gt;] obd_commitrw+0x114/0x380 [ptlrpc]
 [&amp;lt;ffffffffa0a20870&amp;gt;] tgt_brw_write+0xc70/0x1530 [ptlrpc]
 [&amp;lt;ffffffff8105e9b6&amp;gt;] ? enqueue_task+0x66/0x80
 [&amp;lt;ffffffff8105ab8d&amp;gt;] ? check_preempt_curr+0x6d/0x90
 [&amp;lt;ffffffff810674be&amp;gt;] ? try_to_wake_up+0x24e/0x3e0
 [&amp;lt;ffffffffa0974e20&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa0a1f0ac&amp;gt;] tgt_request_handle+0x8ec/0x1440 [ptlrpc]
 [&amp;lt;ffffffffa09cb7b1&amp;gt;] ptlrpc_main+0xd31/0x1800 [ptlrpc]
 [&amp;lt;ffffffff81539b0e&amp;gt;] ? thread_return+0x4e/0x7d0
 [&amp;lt;ffffffffa09caa80&amp;gt;] ? ptlrpc_main+0x0/0x1800 [ptlrpc]
 [&amp;lt;ffffffff810a138e&amp;gt;] kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff810a12f0&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Due to this issue, other service threads can&apos;t move forward because this tx can never complete.&lt;/p&gt;

&lt;p&gt;The node died when I was trying to collect more information. However, these callback trace are enough to start an initial analysis. &lt;/p&gt;</description>
                <environment></environment>
        <key id="41250">LU-8791</key>
            <summary>Suspicious deadlock on OST stack</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="jay">Jinshan Xiong</assignee>
                                    <reporter username="jay">Jinshan Xiong</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Wed, 2 Nov 2016 19:16:01 +0000</created>
                <updated>Mon, 7 Nov 2016 20:15:23 +0000</updated>
                            <resolved>Mon, 7 Nov 2016 20:15:23 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="172063" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"  >&lt;p&gt;At the time of the hang discovery, i dumped debug logs on lola-8 (MGS) lola-5(OSS) and lola-10 (MDS doing recovery) &lt;br/&gt;
I then set debug = -1, waited five minutes and re-dumped the OSS and MDS logs. Attached.&lt;/p&gt;</comment>
                            <comment id="172064" author="cliffw" created="Wed, 2 Nov 2016 19:26:36 +0000"  >&lt;p&gt;Lustre version lustre: 2.8.59_62_g165c308 &lt;br/&gt;
commit 165c3082079d48f70b6e340ad23514e0e7524978&lt;/p&gt;</comment>
                            <comment id="172065" author="cliffw" created="Wed, 2 Nov 2016 19:36:20 +0000"  >&lt;p&gt;Sequence of events:&lt;br/&gt;
Noticed excessive reconnection attempts from lola-10 to lola-5, &lt;br/&gt;
Nov  1 22:03:32 lola-5 kernel: Lustre: soaked-OST000f: Export ffff880310f5bc00 already connecting from 192.168.1.110@o2ib10&lt;br/&gt;
Nov  1 22:04:22 lola-5 kernel: Lustre: soaked-OST000f: Export ffff880310f5bc00 already connecting from 192.168.1.110@o2ib10&lt;br/&gt;
Nov  1 22:05:12 lola-5 kernel: Lustre: soaked-OST000f: Export ffff880310f5bc00 already connecting from 192.168.1.110@o2ib10&lt;br/&gt;
Nov  1 22:06:02 lola-5 kernel: Lustre: soaked-OST000f: Export ffff880310f5bc00 already connecting from 192.168.1.110@o2ib10&lt;br/&gt;
Nov  1 22:06:52 lola-5 kernel: Lustre: soaked-OST000f: Export ffff880310f5bc00 already connecting from 192.168.1.110@o2ib10&lt;br/&gt;
On MDT:&lt;br/&gt;
Nov  1 17:02:28 lola-10 kernel: LustreError: 11-0: soaked-MDT0000-lwp-MDT0002: operation mds_connect to node 192.168.1.108@o2ib10 failed: rc = -114&lt;br/&gt;
Nov  1 17:03:18 lola-10 kernel: LustreError: 11-0: soaked-MDT0000-lwp-MDT0002: operation mds_connect to node 192.168.1.108@o2ib10 failed: rc = -114&lt;br/&gt;
Nov  1 17:04:08 lola-10 kernel: LustreError: 11-0: soaked-MDT0000-lwp-MDT0002: operation mds_connect to node 192.168.1.108@o2ib10 failed: rc = -114&lt;br/&gt;
Nov  1 17:04:58 lola-10 kernel: LustreError: 11-0: soaked-MDT0000-lwp-MDT0002: operation mds_connect to node 192.168.1.108@o2ib10 failed: rc = -114&lt;br/&gt;
Examination revealed lola-5 in a hung state apparently due to deadlock above.&lt;/p&gt;</comment>
                            <comment id="172091" author="jay" created="Thu, 3 Nov 2016 00:22:20 +0000"  >&lt;p&gt;It turned out that the root cause of the deadlock is due to block size change and object write at the same time.&lt;/p&gt;

&lt;p&gt;Thread 1:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;osd_grow_blocksize() with osd_object::oo_guard held
  -&amp;gt; dmu_object_set_blocksize()
    -&amp;gt; dnode_set_blksz(), with dnode_t::dn_struct_rwlock write lock held
      -&amp;gt; dbuf_new_size()
        -&amp;gt; dmu_buf_will_dirty()
          -&amp;gt; dbuf_read()
            -&amp;gt; wait for the dbuf state to change to DB_CACHED
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Thread 2:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;osd_write_commit()
  -&amp;gt; dmu_assign_arcbuf()
    -&amp;gt; dbuf_assign_arcbuf(), set dbuf state to DB_FILL
      -&amp;gt; dbuf_dirty()
        -&amp;gt; try to hold the read lock of dnode_t::dn_struct_rwlock
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In summary, thread 2 sets dbuf to DB_FILL and then try to grab dn_struct_rwlock to complete the assign; while thread 1 holds dn_struct_rwlock and waits for the dbuf state to change to DB_CACHED. Deadlocked.&lt;/p&gt;

&lt;p&gt;In order to fix this problem, we can prevent further writes from happening to the same object while it&apos;s changing the block size. I would propose to hold read lock of osd_object::oo_guard in osd_write_commit().&lt;/p&gt;

&lt;p&gt;Patch is coming soon.&lt;/p&gt;
</comment>
                            <comment id="172093" author="gerrit" created="Thu, 3 Nov 2016 00:49:01 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/23550&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23550&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8791&quot; title=&quot;Suspicious deadlock on OST stack&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8791&quot;&gt;&lt;del&gt;LU-8791&lt;/del&gt;&lt;/a&gt; osd-zfs: hold oo_guard read lock for object write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b8811a0a02c801cc85ee07cb7e68b8514a14b031&lt;/p&gt;</comment>
                            <comment id="172533" author="gerrit" created="Mon, 7 Nov 2016 15:46:13 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/23550/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23550/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8791&quot; title=&quot;Suspicious deadlock on OST stack&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8791&quot;&gt;&lt;del&gt;LU-8791&lt;/del&gt;&lt;/a&gt; osd-zfs: hold oo_guard read lock for object write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: dde6d59ad5c7bb915900bbbe471104ab3c4d3629&lt;/p&gt;</comment>
                            <comment id="172612" author="pjones" created="Mon, 7 Nov 2016 20:15:23 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="23943" name="lola-10.lfsck.full-debug.txt.gz" size="4716835" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"/>
                            <attachment id="23944" name="lola-10.lfsck.hang.txt.gz" size="96221" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"/>
                            <attachment id="23940" name="lola-5.mount.fulldebug.txt.gz" size="4712932" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"/>
                            <attachment id="23941" name="lola-5.mount.hang.txt.gz" size="2641750" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"/>
                            <attachment id="23942" name="lola-8.lfsck.hang.txt.gz" size="244" author="cliffw" created="Wed, 2 Nov 2016 19:20:36 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyu7r:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>