<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:19:38 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8680] replay-single test_20b: BUG: soft lockup - osc_makes_rpc()</title>
                <link>https://jira.whamcloud.com/browse/LU-8680</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Niu Yawei &amp;lt;yawei.niu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a265b3dc-8b49-11e6-ad53-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a265b3dc-8b49-11e6-ad53-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_20b failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;Info required for matching: replay-single 20b&lt;/p&gt;</description>
                <environment></environment>
        <key id="40370">LU-8680</key>
            <summary>replay-single test_20b: BUG: soft lockup - osc_makes_rpc()</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Sun, 9 Oct 2016 05:46:29 +0000</created>
                <updated>Tue, 9 Jun 2020 11:49:47 +0000</updated>
                            <resolved>Fri, 28 Oct 2016 23:54:49 +0000</resolved>
                                    <version>Lustre 2.9.0</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="168838" author="niu" created="Sun, 9 Oct 2016 05:47:27 +0000"  >&lt;p&gt;On client console:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;11:49:29:[ 1435.319733] Lustre: DEBUG MARKER: == replay-single test 20b: write, unlink, eviction, replay, (test mds_cleanup_orphans) =============== 11:48:55 (1475693335)
11:49:29:[ 1437.189973] LustreError: 167-0: lustre-MDT0000-mdc-ffff880078c87000: This client was evicted by lustre-MDT0000; in progress operations using &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; service will fail.
11:49:29:[ 1437.194981] LustreError: 30004:0:(file.c:3403:ll_inode_revalidate_fini()) lustre: revalidate FID [0x200000007:0x1:0x0] error: rc = -5
11:49:29:[ 1460.070003] BUG: soft lockup - CPU#1 stuck &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; 22s! [ptlrpcd_00_00:6005]
11:49:29:[ 1460.070003] Modules linked in: lustre(OE) obdecho(OE) mgc(OE) lov(OE) osc(OE) mdc(OE) lmv(OE) fid(OE) fld(OE) ptlrpc_gss(OE) ptlrpc(OE) obdclass(OE) ksocklnd(OE) lnet(OE) sha512_generic crypto_null libcfs(OE) rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache xprtrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod crc_t10dif crct10dif_generic crct10dif_common ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_sa ib_mad ib_core ib_addr ppdev pcspkr virtio_balloon i2c_piix4 parport_pc parport nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables ext4 mbcache jbd2 ata_generic pata_acpi cirrus syscopyarea sysfillrect sysimgblt drm_kms_helper virtio_blk ttm ata_piix drm serio_raw 8139too libata virtio_pci virtio_ring 8139cp virtio mii i2c_core floppy
11:49:29:[ 1460.070003] CPU: 1 PID: 6005 Comm: ptlrpcd_00_00 Tainted: G           OE  ------------   3.10.0-327.36.1.el7.x86_64 #1
11:49:29:[ 1460.070003] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
11:49:29:[ 1460.070003] task: ffff880036a7f300 ti: ffff880079148000 task.ti: ffff880079148000
11:49:29:[ 1460.070003] RIP: 0010:[&amp;lt;ffffffffa0ae9315&amp;gt;]  [&amp;lt;ffffffffa0ae9315&amp;gt;] osc_makes_rpc+0x45/0x5f0 [osc]
11:49:29:[ 1460.070003] RSP: 0018:ffff88007914bac0  EFLAGS: 00000202
11:49:29:[ 1460.070003] RAX: 0000000000000004 RBX: ffff88007914be58 RCX: 0000000000000004
11:49:29:[ 1460.070003] RDX: 0000000000000001 RSI: ffff88007b0dd4c0 RDI: ffff880079dc50e0
11:49:29:[ 1460.070003] RBP: ffff88007914bad8 R08: ffff88007b0dd510 R09: 000000000000045b
11:49:29:[ 1460.070003] R10: 0000000000010000 R11: 000000000000000b R12: ffffffffa05cbde2
11:49:29:[ 1460.070003] R13: ffff88007914ba58 R14: ffff88007914ba88 R15: 0000000000000013
11:49:29:[ 1460.070003] FS:  0000000000000000(0000) GS:ffff88007fd00000(0000) knlGS:0000000000000000
11:49:29:[ 1460.070003] CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
11:49:29:[ 1460.070003] CR2: 00007f20994b0670 CR3: 000000000194a000 CR4: 00000000000006e0
11:49:29:[ 1460.070003] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
11:49:29:[ 1460.070003] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
11:49:29:[ 1460.070003] Stack:
11:49:29:[ 1460.070003]  ffff88007b0dd568 ffff88007a46ed20 ffff880079dc51c8 ffff88007914bc20
11:49:29:[ 1460.070003]  ffffffffa0aeaa4d 00000000000003f5 0000000100000000 0000000100000000
11:49:29:[ 1460.070003]  0000000100000000 0000000000000000 0000000054d8e01c ffff88007c0b1800
11:49:29:[ 1460.070003] Call Trace:
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aeaa4d&amp;gt;] osc_check_rpcs+0x8d/0x18b0 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810c5618&amp;gt;] ? load_balance+0x218/0x890
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810bb7d8&amp;gt;] ? sched_clock_cpu+0x98/0xc0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8101cd99&amp;gt;] ? sched_clock+0x9/0x10
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aec352&amp;gt;] osc_io_unplug0+0xe2/0x130 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aecf50&amp;gt;] osc_io_unplug+0x10/0x20 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0ac9441&amp;gt;] brw_queue_work+0x31/0xd0 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090f0d7&amp;gt;] work_interpreter+0x37/0xf0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090bdb5&amp;gt;] ptlrpc_check_set.part.23+0x425/0x1dd0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090d7bb&amp;gt;] ptlrpc_check_set+0x5b/0xe0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa09382bb&amp;gt;] ptlrpcd_check+0x4eb/0x5e0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa093866b&amp;gt;] ptlrpcd+0x2bb/0x560 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810b8940&amp;gt;] ? wake_up_state+0x20/0x20
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa09383b0&amp;gt;] ? ptlrpcd_check+0x5e0/0x5e0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5b8f&amp;gt;] kthread+0xcf/0xe0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff81646958&amp;gt;] ret_from_fork+0x58/0x90
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
11:49:29:[ 1460.070003] Code: af ff 01 74 0d f6 05 2a a8 af ff 08 0f 85 44 01 00 00 49 8b 44 24 48 48 85 c0 0f 84 a6 01 00 00 0f b6 80 b4 02 00 00 d0 e8 89 c1 &amp;lt;83&amp;gt; e1 01 83 e2 02 74 3b 8b 83 dc 00 00 00 85 c0 0f 85 b5 00 00 
11:49:29:[ 1460.070003] Kernel panic - not syncing: softlockup: hung tasks
11:49:29:[ 1460.070003] CPU: 1 PID: 6005 Comm: ptlrpcd_00_00 Tainted: G           OEL ------------   3.10.0-327.36.1.el7.x86_64 #1
11:49:29:[ 1460.070003] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
11:49:29:[ 1460.070003]  ffffffff818730cf 0000000054d8e01c ffff88007fd03e18 ffffffff81636301
11:49:29:[ 1460.070003]  ffff88007fd03e98 ffffffff8162fb90 0000000000000008 ffff88007fd03ea8
11:49:29:[ 1460.070003]  ffff88007fd03e48 0000000054d8e01c ffff88007fd03e67 0000000000000000
11:49:29:[ 1460.070003] Call Trace:
11:49:29:[ 1460.070003]  &amp;lt;IRQ&amp;gt;  [&amp;lt;ffffffff81636301&amp;gt;] dump_stack+0x19/0x1b
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8162fb90&amp;gt;] panic+0xd8/0x1e7
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8111b846&amp;gt;] watchdog_timer_fn+0x1b6/0x1c0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8111b690&amp;gt;] ? watchdog_enable+0xc0/0xc0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a9db2&amp;gt;] __hrtimer_run_queues+0xd2/0x260
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810aa350&amp;gt;] hrtimer_interrupt+0xb0/0x1e0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8164831c&amp;gt;] ? call_softirq+0x1c/0x30
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810495c7&amp;gt;] local_apic_timer_interrupt+0x37/0x60
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff81648f8f&amp;gt;] smp_apic_timer_interrupt+0x3f/0x60
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8164765d&amp;gt;] apic_timer_interrupt+0x6d/0x80
11:49:29:[ 1460.070003]  &amp;lt;EOI&amp;gt;  [&amp;lt;ffffffffa0ae9315&amp;gt;] ? osc_makes_rpc+0x45/0x5f0 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aeaa4d&amp;gt;] osc_check_rpcs+0x8d/0x18b0 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810c5618&amp;gt;] ? load_balance+0x218/0x890
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810bb7d8&amp;gt;] ? sched_clock_cpu+0x98/0xc0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff8101cd99&amp;gt;] ? sched_clock+0x9/0x10
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aec352&amp;gt;] osc_io_unplug0+0xe2/0x130 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0aecf50&amp;gt;] osc_io_unplug+0x10/0x20 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa0ac9441&amp;gt;] brw_queue_work+0x31/0xd0 [osc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090f0d7&amp;gt;] work_interpreter+0x37/0xf0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090bdb5&amp;gt;] ptlrpc_check_set.part.23+0x425/0x1dd0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa090d7bb&amp;gt;] ptlrpc_check_set+0x5b/0xe0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa09382bb&amp;gt;] ptlrpcd_check+0x4eb/0x5e0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa093866b&amp;gt;] ptlrpcd+0x2bb/0x560 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810b8940&amp;gt;] ? wake_up_state+0x20/0x20
11:49:29:[ 1460.070003]  [&amp;lt;ffffffffa09383b0&amp;gt;] ? ptlrpcd_check+0x5e0/0x5e0 [ptlrpc]
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5b8f&amp;gt;] kthread+0xcf/0xe0
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff81646958&amp;gt;] ret_from_fork+0x58/0x90
11:49:29:[ 1460.070003]  [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="168839" author="niu" created="Sun, 9 Oct 2016 05:48:05 +0000"  >&lt;p&gt;Xiong, is there an open issue for it?&lt;/p&gt;</comment>
                            <comment id="168881" author="jay" created="Mon, 10 Oct 2016 05:56:35 +0000"  >&lt;p&gt;not to my knowledge.&lt;/p&gt;</comment>
                            <comment id="169250" author="niu" created="Wed, 12 Oct 2016 08:43:46 +0000"  >&lt;p&gt;I searched maloo, and seems it appeared since around Sep 24 (not sure why some results don&apos;t have console logs, so it&apos;s hard to determine the exact date when it appeared first time), and happens quite often.&lt;/p&gt;</comment>
                            <comment id="170047" author="niu" created="Tue, 18 Oct 2016 03:12:10 +0000"  >&lt;p&gt;Looks all these failures happened in interop testing, Sarah, did you ever observe such failure during interop testing?&lt;/p&gt;</comment>
                            <comment id="170329" author="jgmitter" created="Wed, 19 Oct 2016 17:37:37 +0000"  >&lt;p&gt;Hi Bobijam,&lt;/p&gt;

&lt;p&gt;Could you please look into this issue?&lt;/p&gt;

&lt;p&gt;Thanks.&lt;br/&gt;
Joe&lt;/p&gt;</comment>
                            <comment id="170552" author="niu" created="Fri, 21 Oct 2016 02:05:45 +0000"  >&lt;p&gt;This could be a regression caused by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8135&quot; title=&quot;sanity test_101g fails with &amp;#39;not all RPCs are 16 MiB BRW rpcs&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8135&quot;&gt;&lt;del&gt;LU-8135&lt;/del&gt;&lt;/a&gt;, that patch limited chunk number in a write RPC, so for a extent with large number of chunk, osc_check_rpcs() will run into a loop and never break.&lt;/p&gt;

&lt;p&gt;see osc_check_rpcs()&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
                        rc = osc_send_write_rpc(env, cli, osc);
                        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc &amp;lt; 0) {
                                CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;Write request failed with %d\n&quot;&lt;/span&gt;, rc);

                                /* osc_send_write_rpc failed, mostly because of
                                 * memory pressure.
                                 *
                                 * It can&apos;t &lt;span class=&quot;code-keyword&quot;&gt;break&lt;/span&gt; here, because &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;:
                                 *  - a page was submitted by osc_io_submit, so
                                 *    page locked;
                                 *  - no request in flight
                                 *  - no subsequent request
                                 * The system will be in live-lock state,
                                 * because there is no chance to call
                                 * osc_io_unplug() and osc_check_rpcs() any
                                 * more. pdflush can&apos;t help in &lt;span class=&quot;code-keyword&quot;&gt;this&lt;/span&gt; &lt;span class=&quot;code-keyword&quot;&gt;case&lt;/span&gt;,
                                 * because it might be blocked at grabbing
                                 * the page lock as we mentioned.
                                 *
                                 * Anyway, &lt;span class=&quot;code-keyword&quot;&gt;continue&lt;/span&gt; to drain pages. */
                                &lt;span class=&quot;code-comment&quot;&gt;/* &lt;span class=&quot;code-keyword&quot;&gt;break&lt;/span&gt;; */&lt;/span&gt;
                        }
                }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;With the fix of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8135&quot; title=&quot;sanity test_101g fails with &amp;#39;not all RPCs are 16 MiB BRW rpcs&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8135&quot;&gt;&lt;del&gt;LU-8135&lt;/del&gt;&lt;/a&gt;, osc_send_write_rpc() will do nothing when the extent is too large, and osc_check_rpcs() won&apos;t break loop but continue to try on the same object.&lt;/p&gt;</comment>
                            <comment id="170555" author="niu" created="Fri, 21 Oct 2016 02:16:53 +0000"  >&lt;p&gt;One thing I can&apos;t see from the patch of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8135&quot; title=&quot;sanity test_101g fails with &amp;#39;not all RPCs are 16 MiB BRW rpcs&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8135&quot;&gt;&lt;del&gt;LU-8135&lt;/del&gt;&lt;/a&gt; is that: when will the large extent (exceeding chunk limitation) being flushed back?&lt;/p&gt;</comment>
                            <comment id="170685" author="jay" created="Sun, 23 Oct 2016 12:49:42 +0000"  >&lt;p&gt;Indeed. It occurred to me why I would like to have&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;osc_max_write_chunks()
{
        &lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; PTLRPC_MAX_BRW_SIZE &amp;gt;&amp;gt; cli-&amp;gt;cl_chunkbits;
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;instead of the current implementation.&lt;/p&gt;
</comment>
                            <comment id="170701" author="gerrit" created="Mon, 24 Oct 2016 05:24:01 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/23326&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23326&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8680&quot; title=&quot;replay-single test_20b: BUG: soft lockup - osc_makes_rpc()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8680&quot;&gt;&lt;del&gt;LU-8680&lt;/del&gt;&lt;/a&gt; osc: soft lock - osc_makes_rpc()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4b891b4752273c0852bec39e188a6aecfec800de&lt;/p&gt;</comment>
                            <comment id="171668" author="gerrit" created="Fri, 28 Oct 2016 23:49:35 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/23326/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23326/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8680&quot; title=&quot;replay-single test_20b: BUG: soft lockup - osc_makes_rpc()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8680&quot;&gt;&lt;del&gt;LU-8680&lt;/del&gt;&lt;/a&gt; osc: soft lock - osc_makes_rpc()&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: a687000d2400fee88f122526444700502cb57fe4&lt;/p&gt;</comment>
                            <comment id="171684" author="pjones" created="Fri, 28 Oct 2016 23:54:49 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyqwn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>