<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:43:13 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4495] client evicted on parallel append write to the shared file.</title>
                <link>https://jira.whamcloud.com/browse/LU-4495</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;client sometimes evicted with simple workload.&lt;/p&gt;</description>
                <environment>system with 8 sot&amp;#39;s.</environment>
        <key id="22758">LU-4495</key>
            <summary>client evicted on parallel append write to the shared file.</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="jay">Jinshan Xiong</assignee>
                                    <reporter username="shadow">Alexey Lyashkov</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Thu, 16 Jan 2014 07:54:03 +0000</created>
                <updated>Tue, 3 Jun 2014 15:12:06 +0000</updated>
                            <resolved>Tue, 3 Jun 2014 15:12:06 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.5.0</version>
                    <version>Lustre 2.6.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>14</watches>
                                                                            <comments>
                            <comment id="75068" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"  >&lt;p&gt;logs.&lt;/p&gt;</comment>
                            <comment id="75069" author="shadow" created="Thu, 16 Jan 2014 07:56:30 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;0x68020d373efde35c - remote handle
ldlm lock ffff88007aa25b40

cl_lock - ffff88007b403ed0 ??

2487 + 2485 both rw to single file.
2487 - finished fine at 

00000080:00000001:2.0:1389187621.776726:0:2485:0:(file.c:1352:ll_file_write()) Process entered

00000020:00000010:1.0:1389187621.776955:0:2487:0:(cl_lock.c:381:cl_lock_alloc()) slab-alloced &apos;lock&apos;: 216 at ffff88007b403ed0.
00000080:00200000:0.0:1389187621.795589:0:2487:0:(file.c:1216:ll_file_io_generic()) iot: 1, result: 133

00000020:00000010:2.0:1389187621.823858:0:2485:0:(cl_lock.c:381:cl_lock_alloc()) slab-alloced &apos;lock&apos;: 216 at ffff88007a95a228.

00000008:00010000:2.0:1389187621.823865:0:2485:0:(osc_lock.c:1743:osc_lock_init()) ### lock ffff88007a95a228, osc lock ffff88007000dd98, flags 0

00010000:00000010:2.0:1389187621.823949:0:2485:0:(ldlm_lock.c:477:ldlm_lock_new()) slab-alloced &apos;lock&apos;: 504 at ffff88007aa25b40.


00010000:00010000:1.0:1389187621.825396:0:1673:0:(ldlm_lockd.c:1696:ldlm_handle_cp_callback()) ### client completion callback handler START ns: centfs01-OST0003-osc-ffff880037b29400 lock: ffff88007aa25b40/0x68020d373efde35c lrc: 4/0,1 mode: --/PW res: [0xa2:0x0:0x0].0 rrc: 1 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x20000 nid: local remote: 0xb66b896f3d8df895 expref: -99 pid: 2485 timeout: 0 lvb_type: 1
1389187621.825396 - cp for all parents (ffff88007a95a228 + ffff88007b403ed0)

00000020:00010000:1.0:1389187621.825466:0:1673:0:(cl_lock.c:1608:cl_lock_modify()) lock@ffff88007a95a228[2 2 0 1 1 00000000] W(2):[0, 18446744073709551615]@[0x100030000:0xa2:0x0] {
00000020:00010000:1.0:1389187621.825469:0:1673:0:(cl_lock.c:1608:cl_lock_modify())     lovsub@ffff88007ac19ba0: [1 ffff88007ce93af0 W(2):[0, 18446744073709551615]@[0x380002340:0x7f:0x0]]
00000020:00010000:1.0:1389187621.825471:0:1673:0:(cl_lock.c:1608:cl_lock_modify())     osc@ffff88007000dd98: ffff88007aa25b40    0x20000020002 0x68020d373efde35c 3 (null) size: 0 mtime: 0 atime: 0 ctime: 0 blocks: 0
00000020:00010000:1.0:1389187621.825472:0:1673:0:(cl_lock.c:1608:cl_lock_modify()) } lock@ffff88007a95a228
00000020:00010000:1.0:1389187621.825473:0:1673:0:(cl_lock.c:1608:cl_lock_modify())  -&amp;gt; W(2):[0, 18446744073709551615]@[0x100030000:0xa2:0x0]


00010000:00010000:1.0:1389187621.838412:0:2485:0:(ldlm_lock.c:918:ldlm_lock_decref_internal()) ### add lock into lru list ns: centfs01-OST0003-osc-ffff880037b29400 lock: ffff88007aa25b40/0x68020d373efde35c lrc: 3/0,0 mode: PW/PW res: [0xa2:0x0:0x0].0 rrc: 1 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x10020000020000 nid: local remote: 0xb66b896f3d8df895 expref: -99 pid: 2485 timeout: 0 lvb_type: 1

00000020:00000010:1.0:1389187621.839272:0:2485:0:(cl_lock.c:286:cl_lock_free()) slab-freed &apos;(lock)&apos;: 216 at ffff88007b403ed0.

=====================


00000020:00000010:1.0:1389187621.839276:0:2485:0:(cl_lock.c:381:cl_lock_alloc()) slab-alloced &apos;lock&apos;: 216 at ffff88007b403ed0

00000020:00000001:1.0:1389187621.839519:0:2485:0:(cl_lock.c:1288:cl_enqueue_locked()) Process entered

connection from ldlm &amp;lt;&amp;gt; osc &amp;lt;&amp;gt; cl ?

00010000:00010000:1.0:1389187621.840735:0:1884:0:(ldlm_lockd.c:1654:ldlm_handle_bl_callback()) ### client blocking AST callback handler ns: centfs01-OST0003-osc-ffff880037b29400 lock: ffff88007aa25b40/0x68020d373efde35c lrc: 3/0,0 mode: PW/PW res: [0xa2:0x0:0x0].0 rrc: 1 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x420000020000 nid: local remote: 0xb66b896f3d8df895 expref: -99 pid: 2485 timeout: 0 lvb_type: 1

1389187621.840734 - bl ast for ffff88007a95a228 (?!!) in enclose with ffff88007b403ed0
....
00000020:00000001:1.0:1389187621.841322:0:2485:0:(cl_lock.c:953:cl_lock_state_wait()) Process entered
00000020:00010000:1.0:1389187621.841322:0:2485:0:(cl_lock.c:151:cl_lock_trace0()) state wait lock: ffff88007b403ed0@(9 ffff88007ba56ae0 1 1 0 1 1 0)(ffff88007d7c4140/0/1) at cl_lock_state_wait():959
00000020:00000001:1.0:1389187621.841324:0:2485:0:(cl_lock.c:151:cl_lock_trace0()) put mutex: ffff88007b403ed0@(9 ffff88007ba56ae0 1 1 0 1 1 0)(ffff88007d7c4140/0/1) at cl_lock_mutex_put():754


00000020:00000001:1.0:1389187772.690095:0:2485:0:(cl_lock.c:151:cl_lock_trace0()) got mutex: ffff88007b403ed0@(9 ffff88007ba56ae0 1 1 0 1 1 0)(ffff88007d7c4140/0/1) at cl_lock_mutex_tail():662
00000020:00000001:1.0:1389187772.690097:0:2485:0:(cl_lock.c:990:cl_lock_state_wait()) Process leaving (rc=0 : 0 : 0)


--- 
00000020:00000001:1.0:1389187772.690198:0:2485:0:(cl_lock.c:2072:cl_lock_unhold()) Process entered
00000020:00000001:1.0:1389187772.690198:0:2485:0:(cl_lock.c:900:cl_lock_hold_release()) Process entered
00000020:00010000:1.0:1389187772.690198:0:2485:0:(cl_lock.c:151:cl_lock_trace0()) hold release lock: ffff88007a95a228@(2 ffff88007ba56ae0 1 5 0 1 0 6)(ffff88007d7c4140/1/1) at cl_lock_hold_release():901
00000020:00000001:1.0:1389187772.690200:0:2485:0:(cl_lock.c:798:cl_lock_cancel0()) Process entered

00010000:00010000:1.0:1389187772.690370:0:2485:0:(ldlm_request.c:1186:ldlm_cancel_pack()) ### packing ns: centfs01-OST0003-osc-ffff880037b29400 lock: ffff88007aa25b40/0x68020d373efde35c lrc: 1/0,0 mode: --/PW res: [0xa2:0x0:0x0].0 rrc: 1 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x4c69400020000 nid: local remote: 0xb66b896f3d8df895 expref: -99 pid: 2485 timeout: 0 lvb_type: 1

00000020:00000010:1.0:1389187772.690421:0:2485:0:(cl_lock.c:286:cl_lock_free()) slab-freed &apos;(lock)&apos;: 216 at ffff88007a95a228.

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="75080" author="shadow" created="Thu, 16 Jan 2014 13:22:10 +0000"  >&lt;p&gt;per discussion with Nikita, he point me - sub level lock in hold state it&apos;s prevent to send an bl ast notification to top level lock and lock don&apos;t wakeup. but it&apos;s looks a bug as hold state should be set only for short time when top level lock don&apos;t able to receive any notification from sub locks.&lt;br/&gt;
So we should be don&apos;t have any locks in hold state over cl_lock_state_wait() as it&apos;s prevent an have any notification from sub locks.&lt;/p&gt;

&lt;p&gt;Jay what you think about it ?&lt;/p&gt;</comment>
                            <comment id="75240" author="shadow" created="Sat, 18 Jan 2014 15:10:29 +0000"  >&lt;p&gt;with Nikita and Jay assistance - i found a root cause of bug.&lt;br/&gt;
it&apos;s deadlock between two nodes.&lt;br/&gt;
file have a some stripes, cli1 have locks for full stripes set, but second client want to write to same file and they tried to enqueue a locks.&lt;br/&gt;
enquiring locks produce a cancel of two first locks on client1. ...&lt;br/&gt;
client1 take a lock1, and HOLD a lock5.&lt;br/&gt;
but client 2 have a lock1 referenced and need take lock5, which hold on client1.&lt;br/&gt;
client2 don&apos;t able to release a lock1 as it&apos;s referenced in that time, but client1 ignore a bl sat request as lock in hold state.&lt;/p&gt;

&lt;p&gt;that all. client&apos;s deadlocked. Looks bug exist on any 2.x code.&lt;/p&gt;</comment>
                            <comment id="75242" author="jforgan" created="Sat, 18 Jan 2014 17:32:14 +0000"  >&lt;p&gt;Answers to questions posed by Peter Jones by email:&lt;/p&gt;

&lt;p&gt;  2.  Which neo branch on &lt;a href=&quot;https://github.com/Xyratex/lustre-stable&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/Xyratex/lustre-stable&lt;/a&gt; contains the specific code in use?&lt;/p&gt;

&lt;p&gt;The server code is based on Lustre 2.1 and is here: &lt;a href=&quot;https://github.com/Xyratex/lustre-stable/tree/b_neo_stable_1.3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/Xyratex/lustre-stable/tree/b_neo_stable_1.3&lt;/a&gt;&lt;/p&gt;


&lt;p&gt;  3.  Which exact version of 2.4.x are running on the clients and are any patches applied?&lt;/p&gt;

&lt;p&gt;The issue was reported on a Customer client based on 2.4, although I believe they have also recreated on vanilla 2.4 and master. I&apos;ve pinged them to answer more fully. Note Alexey&apos;s comment above on his belief that it present in any 2.x Client&lt;/p&gt;</comment>
                            <comment id="75244" author="paf" created="Sat, 18 Jan 2014 18:05:56 +0000"  >&lt;p&gt;I can confirm that we&apos;ve reproduced (Cray is the customer referenced above) it on master clients running to master servers.  Our original customer report was on a 2.4 derived client, but as I said, it&apos;s present in master.&lt;/p&gt;

&lt;p&gt;The logs and reproducer Alexey attached to this ticket are from our master/master test system.&lt;/p&gt;

&lt;p&gt;I wasn&apos;t able to replicate with our reproducer with 2.2 clients, but was able to with 2.3 and master.  Alexey says the bug exists in any 2.x version, so it&apos;s likely the timings are different and the reproducer misses it on 2.2.&lt;/p&gt;</comment>
                            <comment id="75245" author="shadow" created="Sat, 18 Jan 2014 18:21:08 +0000"  >&lt;p&gt;I confirm &lt;br/&gt;
cl_lock_alloc &lt;del&gt;&amp;gt; lov_lock_sub_init -&amp;gt; lov_sublock_alloc&lt;/del&gt;&amp;gt; .. -&amp;gt;cl_lock_hold exist on &lt;br/&gt;
/lov_lock.c/1.19/Wed Nov 25 02:10:38 2009//&lt;br/&gt;
so it&apos;s very close to the original clio code.&lt;/p&gt;

&lt;p&gt;but it&apos;s needs situation when second process start IO while one or more locks for a multi stripe file still in cache. so that bug will be never replicated for lustre default striping (1 stripe per file).&lt;/p&gt;
</comment>
                            <comment id="75371" author="paf" created="Tue, 21 Jan 2014 19:39:00 +0000"  >&lt;p&gt;First patch from Alexey L.&lt;/p&gt;</comment>
                            <comment id="75372" author="paf" created="Tue, 21 Jan 2014 19:47:44 +0000"  >&lt;p&gt;A number of updates here.  Alexey L. provided a patch for Cray to test, and we confirmed it fixes the deadlock.  (We&apos;ve tested on Cray&apos;s version of 2.5, and on current master.)&lt;/p&gt;

&lt;p&gt;I&apos;ve attached that patch.&lt;/p&gt;

&lt;p&gt;Essentially, in lov_lock_sub_init, it removes a for loop which has this comment on it:&lt;br/&gt;
        /*&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;Then, create sub-locks. Once at least one sub-lock was created,&lt;/li&gt;
	&lt;li&gt;top-lock can be reached by other threads.&lt;br/&gt;
         */&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Here&apos;s Alexey&apos;s explanation of the deadlock with a slight edit by me:&lt;br/&gt;
&amp;#8212;&lt;br/&gt;
1) client1 has a full set of locks for a file&lt;/p&gt;
{ stripe1, stripe2, stripe3}
&lt;p&gt;, client2 has no locks.&lt;/p&gt;

&lt;p&gt;2) client1 finished an io and put locks to inactive states,&lt;br/&gt;
so client2 is able to take 2 of 3 locks from client1 and reference them.&lt;/p&gt;

&lt;p&gt;3) client1 enters new IO loop (different process)&lt;br/&gt;
and start to take a lock quorum - in top lock allocation time&lt;br/&gt;
it finds a stripe3 lock in cache and holds it.&lt;/p&gt;

&lt;p&gt;4) client2 tries to take a stripe3 lock but&lt;br/&gt;
it&apos;s protected by hold flag now and BL AST ignored&lt;br/&gt;
(just mark a lock a callback pending).&lt;br/&gt;
client2 enters the waiting for grant loop.&lt;/p&gt;

&lt;p&gt;5) client1 enter to lock enqueue loop to take a stripe1 lock,&lt;br/&gt;
but client2 can&apos;t return that lock because it referenced.&lt;br/&gt;
... so deadlock.&lt;br/&gt;
&#8212;&lt;br/&gt;
This patch removes sub lock creation from lov_lock_sub_init.&lt;/p&gt;

&lt;p&gt;If sub locks are not created in lov_lock_sub_init,&lt;br/&gt;
they are created in the enqueue process. This avoids the&lt;br/&gt;
deadlock described above by avoiding step 3.&lt;br/&gt;
&amp;#8212;&lt;/p&gt;

&lt;p&gt;We&apos;ve run our general Lustre IO performance test suite (multiple clients, various IO patterns, various stripe counts) and weren&apos;t able to measure any difference between with the patch and without the patch.&lt;/p&gt;

&lt;p&gt;We&apos;re currently running it through sanity and some other stress testing.&lt;/p&gt;

&lt;p&gt;Going forward, we&apos;re wondering if this is expected to be the final version of the patch.  Essentially, this fixes the problem with this performance optimization by removing the optimization.  As far as I can tell, the optimization seems to be basically allowing better re-use of cl_locks, and as I understand it, caching &amp;amp; re-use of cl_locks is slated for removal in the upcoming CLIO simplification anyway.&lt;/p&gt;

&lt;p&gt;So it seems like it may be OK to leave this optimization out.&lt;/p&gt;

&lt;p&gt;Is this the plan, or is the hope for a patch that actually fixes the performance optimization rather than removes it?&lt;/p&gt;</comment>
                            <comment id="75496" author="shadow" created="Thu, 23 Jan 2014 08:08:40 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/8971&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8971&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Xyratex-bug-id: CLSTR-2003, MRP-1603. &lt;/p&gt;</comment>
                            <comment id="80524" author="spitzcor" created="Fri, 28 Mar 2014 21:18:02 +0000"  >&lt;p&gt;The patch is still pending review.  Can it land?&lt;/p&gt;</comment>
                            <comment id="80870" author="jay" created="Wed, 2 Apr 2014 16:46:57 +0000"  >&lt;p&gt;Hi Cory, Patrick confirmed that the patch couldn&apos;t fix the problem so this patch can&apos;t be landed. Did I miss anything here?&lt;/p&gt;

&lt;p&gt;Jinshan&lt;/p&gt;</comment>
                            <comment id="80883" author="paf" created="Wed, 2 Apr 2014 17:56:16 +0000"  >&lt;p&gt;Jinshan - This is my fault, actually.  I said to Cory today that it seemed like getting something landed at Intel for this was dragging on and on.  I wasn&apos;t specific about it - It&apos;s on Alexey to update the patch, rather than anyone at Intel to do something.&lt;/p&gt;

&lt;p&gt;Sorry about that!&lt;/p&gt;

&lt;p&gt;I am curious: How do you feel about the approach found in patch set 1, rather than patch set 2/3?  IE, disable the code, rather than try to fix it?  That approach is working fine for Cray, Alexey was just hoping to do better.&lt;/p&gt;</comment>
                            <comment id="81002" author="jay" created="Thu, 3 Apr 2014 22:29:58 +0000"  >&lt;p&gt;Take it easy, Patrick. To be honest, I didn&apos;t look into the patch v2 and v3.&lt;/p&gt;

&lt;p&gt;Anyway, can you please create a patch to comment the code out, as I mentioned in patch v1?&lt;/p&gt;</comment>
                            <comment id="81158" author="pichong" created="Tue, 8 Apr 2014 07:51:46 +0000"  >&lt;p&gt;Hello,&lt;/p&gt;

&lt;p&gt;I would like to mention that, on a lustre 2.4.2 version we were seeing client evictions when running a testcase which makes tasks append write to a single shared file.&lt;/p&gt;

&lt;p&gt;After applying patch &lt;a href=&quot;http://review.whamcloud.com/9876&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/9876&lt;/a&gt; &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4558&quot; title=&quot;Crash in cl_lock_put on racer&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4558&quot;&gt;&lt;del&gt;LU-4558&lt;/del&gt;&lt;/a&gt; &quot;clio: Solve a race in cl_lock_put&quot;, the client evictions did not occur anymore.&lt;/p&gt;</comment>
                            <comment id="81164" author="paf" created="Tue, 8 Apr 2014 13:33:20 +0000"  >&lt;p&gt;Jinshan:&lt;/p&gt;

&lt;p&gt;Actually, Alexey&apos;s original patch (commenting out code) removes exactly the same code as your patch:&lt;/p&gt;

&lt;p&gt;    &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4381&quot; title=&quot;clio deadlock from truncate&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4381&quot;&gt;&lt;del&gt;LU-4381&lt;/del&gt;&lt;/a&gt; lov: to not hold sub locks at initialization&lt;/p&gt;

&lt;p&gt;    Otherwise, it will cause deadlock because it essentially holds&lt;br/&gt;
    some sub locks and then to request others in an arbitrary order.&lt;/p&gt;

&lt;p&gt;    Signed-off-by: Jinshan Xiong &amp;lt;jinshan.xiong@intel.com&amp;gt;&lt;br/&gt;
    Change-Id: I00d51677aa94ec41856402fd8a027e09355786ee&lt;br/&gt;
    Reviewed-on: &lt;a href=&quot;http://review.whamcloud.com/9152&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/9152&lt;/a&gt;&lt;br/&gt;
    Tested-by: Jenkins&lt;br/&gt;
    Tested-by: Maloo &amp;lt;hpdd-maloo@intel.com&amp;gt;&lt;br/&gt;
    Reviewed-by: Lai Siyao &amp;lt;lai.siyao@intel.com&amp;gt;&lt;br/&gt;
    Reviewed-by: Bobi Jam &amp;lt;bobijam@gmail.com&amp;gt;&lt;br/&gt;
    Reviewed-by: Oleg Drokin &amp;lt;oleg.drokin@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;Which has been landed to master.&lt;br/&gt;
So, nevermind!  This bug can be closed.&lt;/p&gt;</comment>
                            <comment id="81173" author="paf" created="Tue, 8 Apr 2014 14:33:00 +0000"  >&lt;p&gt;This is a duplicate of:&lt;br/&gt;
&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-4381&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-4381&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="81322" author="jay" created="Wed, 9 Apr 2014 20:37:02 +0000"  >&lt;p&gt;duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4381&quot; title=&quot;clio deadlock from truncate&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4381&quot;&gt;&lt;del&gt;LU-4381&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="85596" author="jlevi" created="Tue, 3 Jun 2014 15:11:55 +0000"  >&lt;p&gt;Reopening to close as duplicate as indicated in the comments.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="22449">LU-4381</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="13983" name="1590-ptlrpcd" size="2021022" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="13984" name="1673-cp_ast" size="17976" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="13985" name="1884-bl_ast" size="3897" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="13986" name="2485-main" size="1285384" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="13987" name="2487-main" size="1736308" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="13988" name="mpi_log.c" size="842" author="shadow" created="Thu, 16 Jan 2014 07:55:24 +0000"/>
                            <attachment id="14001" name="patch" size="1555" author="paf" created="Tue, 21 Jan 2014 19:39:00 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwcxr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12299</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>