<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:16:38 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8334] OSS lockup</title>
                <link>https://jira.whamcloud.com/browse/LU-8334</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;OSS deadlocked unable to ping ethernet or IB interfaces. Console showed no errors.&lt;br/&gt;
Attaching full trace of all threads. Most notable are kiblnd &lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;ID: 8711   TASK: ffff882020b12ab0  CPU: 4   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;kiblnd_sd_01_01&quot;&lt;/span&gt;
 #0 [ffff880060c86e90] crash_nmi_callback at ffffffff81032256
 #1 [ffff880060c86ea0] notifier_call_chain at ffffffff81568515
 #2 [ffff880060c86ee0] atomic_notifier_call_chain at ffffffff8156857a
 #3 [ffff880060c86ef0] notify_die at ffffffff810a44fe
 #4 [ffff880060c86f20] do_nmi at ffffffff8156618f
 #5 [ffff880060c86f50] nmi at ffffffff815659f0
    [exception RIP: _spin_lock+33]
    RIP: ffffffff81565261  RSP: ffff882021b75b70  RFLAGS: 00000293
    RAX: 0000000000002b8e  RBX: ffff880ffe7dd240  RCX: 0000000000000000
    RDX: 0000000000002b8b  RSI: 0000000000000003  RDI: ffff88201ee3f140
    RBP: ffff882021b75b70   R8: 6950000000000000   R9: 4a80000000000000
    R10: 0000000000000001  R11: 0000000000000001  R12: 0000000000000018
    R13: ffff881013262e40  R14: ffff8820268ecac0  R15: 0000000000000004
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
 #6 [ffff882021b75b70] _spin_lock at ffffffff81565261
 #7 [ffff882021b75b78] cfs_percpt_lock at ffffffffa049edab [libcfs]
 #8 [ffff882021b75bb8] lnet_ptl_match_md at ffffffffa0529605 [lnet]
 #9 [ffff882021b75c38] lnet_parse_local at ffffffffa05306e7 [lnet]
#10 [ffff882021b75cd8] lnet_parse at ffffffffa05316da [lnet]
#11 [ffff882021b75d68] kiblnd_handle_rx at ffffffffa0a16f3b [ko2iblnd]
#12 [ffff882021b75db8] kiblnd_scheduler at ffffffffa0a182be [ko2iblnd]
#13 [ffff882021b75ee8] kthread at ffffffff8109dc8e
#14 [ffff882021b75f48] kernel_thread at ffffffff8100c28a
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>2.7.1-fe</environment>
        <key id="37846">LU-8334</key>
            <summary>OSS lockup</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bfaccini">Bruno Faccini</assignee>
                                    <reporter username="mhanafi">Mahmoud Hanafi</reporter>
                        <labels>
                    </labels>
                <created>Mon, 27 Jun 2016 22:09:12 +0000</created>
                <updated>Thu, 14 Jun 2018 21:41:20 +0000</updated>
                            <resolved>Thu, 22 Sep 2016 21:42:45 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="157070" author="mhanafi" created="Mon, 27 Jun 2016 22:12:12 +0000"  >&lt;p&gt;One more stack trace. Looks like it could be a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8249&quot; title=&quot;Potential deadlock in lnet&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8249&quot;&gt;&lt;del&gt;LU-8249&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;PID: 18374  TASK: ffff880ffe79a040  CPU: 6   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;ptlrpcd_01_00&quot;&lt;/span&gt;
 #0 [ffff880060cc6e90] crash_nmi_callback at ffffffff81032256
 #1 [ffff880060cc6ea0] notifier_call_chain at ffffffff81568515
 #2 [ffff880060cc6ee0] atomic_notifier_call_chain at ffffffff8156857a
 #3 [ffff880060cc6ef0] notify_die at ffffffff810a44fe
 #4 [ffff880060cc6f20] do_nmi at ffffffff8156618f
 #5 [ffff880060cc6f50] nmi at ffffffff815659f0
    [exception RIP: _spin_lock+33]
    RIP: ffffffff81565261  RSP: ffff880f2be33ad0  RFLAGS: 00000287
    RAX: 0000000000002b92  RBX: ffff880ffe7dd240  RCX: 0000000000000000
    RDX: 0000000000002b8b  RSI: 0000000000000003  RDI: ffff88201ee3f140
    RBP: ffff880f2be33ad0   R8: ffff880bda1ea340   R9: 0000000000000000
    R10: 0000000000000047  R11: 00000000000000aa  R12: 0000000000000018
    R13: 0000000000000001  R14: 00057407d4831d60  R15: ffff882021aebb80
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
 #6 [ffff880f2be33ad0] _spin_lock at ffffffff81565261
 #7 [ffff880f2be33ad8] cfs_percpt_lock at ffffffffa049edab [libcfs]
 #8 [ffff880f2be33b18] LNetMEAttach at ffffffffa0523466 [lnet]
 #9 [ffff880f2be33b78] ptl_send_rpc at ffffffffa07ffa65 [ptlrpc]
#10 [ffff880f2be33c48] ptlrpc_send_new_req at ffffffffa07f39e3 [ptlrpc]
#11 [ffff880f2be33cb8] ptlrpc_check_set at ffffffffa07f7cb0 [ptlrpc]
#12 [ffff880f2be33d78] ptlrpcd_check at ffffffffa0826ad3 [ptlrpc]
#13 [ffff880f2be33dd8] ptlrpcd at ffffffffa0826f82 [ptlrpc]
#14 [ffff880f2be33ee8] kthread at ffffffff8109dc8e
#15 [ffff880f2be33f48] kernel_thread at ffffffff8100c28a
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="157105" author="bfaccini" created="Tue, 28 Jun 2016 11:24:29 +0000"  >&lt;p&gt;Hello Mahmoud,&lt;br/&gt;
I wonder if you had the chance to get a crash-dump from one of these occurrences ??&lt;br/&gt;
If not, do you have more details about the thread with the following stack ?? :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;PID: 61350  TASK: ffff88145f9d1520  CPU: 14  COMMAND: &quot;python&quot;
 #0 [ffff8810788c6e90] crash_nmi_callback at ffffffff81032256
 #1 [ffff8810788c6ea0] notifier_call_chain at ffffffff81568515
 #2 [ffff8810788c6ee0] atomic_notifier_call_chain at ffffffff8156857a
 #3 [ffff8810788c6ef0] notify_die at ffffffff810a44fe
 #4 [ffff8810788c6f20] do_nmi at ffffffff8156618f
 #5 [ffff8810788c6f50] nmi at ffffffff815659f0
    [exception RIP: s_show+231]
    RIP: ffffffff8116ea67  RSP: ffff8817f2c83d98  RFLAGS: 00000086
    RAX: ffff88203fc004c0  RBX: 0000001b547b089e  RCX: ffff88203fc00500
    RDX: 0000000000000001  RSI: ffff880b7de4b800  RDI: ffff88203fc004d0
    RBP: ffff8817f2c83e18   R8: 0000000000000008   R9: 0000000000000008
    R10: ffffffff817dd61e  R11: 0000000000000004  R12: 000000daa3d7c8d1
    R13: ffffffff817dd61e  R14: 0000000000000801  R15: 0000000000000000
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
 #6 [ffff8817f2c83d98] s_show at ffffffff8116ea67
 #7 [ffff8817f2c83e20] seq_read at ffffffff811ae5b9
 #8 [ffff8817f2c83ea0] proc_reg_read at ffffffff811f4dbe
 #9 [ffff8817f2c83ef0] vfs_read at ffffffff81188fe5
#10 [ffff8817f2c83f30] sys_read at ffffffff81189121
#11 [ffff8817f2c83f80] system_call_fastpath at ffffffff8100b0d2
    RIP: 00007fffec9de500  RSP: 00007fffffffe220  RFLAGS: 00010202
    RAX: 0000000000000000  RBX: ffffffff8100b0d2  RCX: 0000000000008050
    RDX: 0000000000002c00  RSI: 00000000007989f9  RDI: 0000000000000004
    RBP: 0000000000000000   R8: 00000000007979e4   R9: 00007fffedaee700
    R10: 3834202020202030  R11: 0000000000000246  R12: 00000000007989f9
    R13: 0000000000002feb  R14: 0000000000793780  R15: 0000000000002feb
    ORIG_RAX: 0000000000000000  CS: 0033  SS: 002b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Could it be doing something similar to &quot;slabinfo&quot; ??&lt;/p&gt;

&lt;p&gt;About your parallel with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8249&quot; title=&quot;Potential deadlock in lnet&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8249&quot;&gt;&lt;del&gt;LU-8249&lt;/del&gt;&lt;/a&gt;, you may be right, but for this ticket we are also missing infos about the exact state and possible cause of the dead-lock, and thus it is also difficult to be sure that its provided patch could be the solution.&lt;/p&gt;</comment>
                            <comment id="157174" author="mhanafi" created="Tue, 28 Jun 2016 19:27:02 +0000"  >&lt;p&gt;We do have a crash dump. I can upload it but it can only view by a US citizen. We have done this before.&lt;/p&gt;

&lt;p&gt;The python script runs every minute and save several files from /proc/. one of which is /proc/slabinfo. We have been running this for sometime on all our lustre servers.&lt;/p&gt;


</comment>
                            <comment id="157180" author="pjones" created="Tue, 28 Jun 2016 20:13:28 +0000"  >&lt;p&gt;Mahmoud&lt;/p&gt;

&lt;p&gt;Bruno is not a US citizen but if you are able to provide access to the crash dump we can have it examined by someone who is&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="157190" author="mhanafi" created="Tue, 28 Jun 2016 21:30:38 +0000"  >&lt;p&gt;I can upload the encrypted crash dump. Who should I send the password to?&lt;/p&gt;
</comment>
                            <comment id="157192" author="pjones" created="Tue, 28 Jun 2016 21:33:08 +0000"  >&lt;p&gt;Oleg&lt;/p&gt;</comment>
                            <comment id="157292" author="mhanafi" created="Wed, 29 Jun 2016 17:11:39 +0000"  >&lt;p&gt;core file uploaded and email was sent to Oleg. &lt;/p&gt;</comment>
                            <comment id="157523" author="bfaccini" created="Fri, 1 Jul 2016 15:03:42 +0000"  >&lt;p&gt;Oleg, just in case it may help, I have better analyzed the all/whole stacks listing originally provided, and I am definitely convinced now that, as I already strongly suspected before, this &quot;python&quot; thread parsing the /proc/slabinfo stuff is the one blocking all the others.&lt;/p&gt;

&lt;p&gt;As per its stack content, that I have already pointed before, it should have taken/spin_lock()&apos;ed a kmem_cache-&amp;gt;nodelists[]-&amp;gt;list_lock (located @0xffff88203fc00500), that&apos;s blocking several others threads, and particularly 2 kiblnd threads (with PIDs 8713/8715) that I have found with the following similar stack :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;PID: 8713   TASK: ffff882027d55520  CPU: 9   COMMAND: &quot;kiblnd_sd_02_01&quot;
 #0 [ffff881078826e90] crash_nmi_callback at ffffffff81032256
 #1 [ffff881078826ea0] notifier_call_chain at ffffffff81568515
 #2 [ffff881078826ee0] atomic_notifier_call_chain at ffffffff8156857a
 #3 [ffff881078826ef0] notify_die at ffffffff810a44fe
 #4 [ffff881078826f20] do_nmi at ffffffff8156618f
 #5 [ffff881078826f50] nmi at ffffffff815659f0
    [exception RIP: _spin_lock+33]
    RIP: ffffffff81565261  RSP: ffff88201e8c5ba0  RFLAGS: 00000097
    RAX: 00000000000056fb  RBX: ffff881376245200  RCX: 0000000000000001
    RDX: 00000000000056f7  RSI: 000000000000005a  RDI: ffff88203fc00500
    RBP: ffff88201e8c5ba0   R8: 0000000000000000   R9: ffff881376245200
    R10: 0000000000000001  R11: 0000000000000000  R12: ffff8820294f8c00
    R13: 0000000000000286  R14: ffff88103fcb02c0  R15: ffff88203fc004c0
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
 #6 [ffff88201e8c5ba0] _spin_lock at ffffffff81565261
 #7 [ffff88201e8c5ba8] kfree at ffffffff811704b9
 #8 [ffff88201e8c5c18] null_free_rs at ffffffffa08412b0 [ptlrpc]
 #9 [ffff88201e8c5c38] sptlrpc_svc_free_rs at ffffffffa082fded [ptlrpc]
#10 [ffff88201e8c5c58] lustre_free_reply_state at ffffffffa0802f64 [ptlrpc]
#11 [ffff88201e8c5c68] reply_out_callback at ffffffffa0809030 [ptlrpc]
#12 [ffff88201e8c5c98] ptlrpc_master_callback at ffffffffa08085ca [ptlrpc]
#13 [ffff88201e8c5ca8] lnet_eq_enqueue_event at ffffffffa0525b55 [lnet]
#14 [ffff88201e8c5cc8] lnet_msg_detach_md at ffffffffa0523cef [lnet]
#15 [ffff88201e8c5cf8] lnet_finalize at ffffffffa0524dd6 [lnet]
#16 [ffff88201e8c5d68] kiblnd_tx_done at ffffffffa0a11fd7 [ko2iblnd]
#17 [ffff88201e8c5db8] kiblnd_scheduler at ffffffffa0a18170 [ko2iblnd]
#18 [ffff88201e8c5ee8] kthread at ffffffff8109dc8e
#19 [ffff88201e8c5f48] kernel_thread at ffffffff8100c28a
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;and according to this stack&apos;s content, these 2 threads should have spin_lock()&apos;ed (lnet_res_lock()/cfs_percpt_lock() before calling lnet_msg_detach_md()) the lock(s) that all others threads spinning in cfs_percpt_lock() are waiting for.&lt;/p&gt;

&lt;p&gt;Now the question is, why does s_show() take so long to release its kmem_cache-&amp;gt;nodelists[]-&amp;gt;list_lock ? Could there be a loop in any of the associated slabs_full/slabs_partial lists ?&lt;/p&gt;</comment>
                            <comment id="157713" author="mhanafi" created="Tue, 5 Jul 2016 20:18:44 +0000"  >&lt;p&gt;Could I get an update on the progress of this issue.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;/p&gt;</comment>
                            <comment id="157716" author="green" created="Tue, 5 Jul 2016 21:42:26 +0000"  >&lt;p&gt;Bruno: it looks like you are on to something.&lt;/p&gt;

&lt;p&gt;when I do kmem -s in the crashdump, I get this error:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;kmem: size-512: full list: slab: ffff8813e0ab36c0  bad prev pointer: ffff881ae71d4540
kmem: size-512: full list: slab: ffff8813e0ab36c0  bad inuse counter: 0
kmem: size-512: full list: slab: ffff8813e0ab36c0  bad s_mem pointer: ffff881ae71d4000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Indeed, ffff881ae71d4540 is invalid kernel address that I cannot access. Same for ffff881ae71d4000&lt;/p&gt;</comment>
                            <comment id="157796" author="bfaccini" created="Wed, 6 Jul 2016 13:19:44 +0000"  >&lt;p&gt;Well slab at ffff8813e0ab36c0 may be corrupted and cause s_show() to loop ... Also, for which kmem_cache has it been allocated ?&lt;/p&gt;</comment>
                            <comment id="157820" author="green" created="Wed, 6 Jul 2016 16:12:11 +0000"  >&lt;p&gt;it&apos;s &quot;size-512&quot;&lt;/p&gt;</comment>
                            <comment id="158104" author="bfaccini" created="Fri, 8 Jul 2016 09:27:47 +0000"  >&lt;p&gt;Right, &quot;crash/kmem -s&quot; had already indicated this!&lt;br/&gt;
But then, what about the full content of this slab and even &quot;kmem -S size-512&quot; output ?&lt;/p&gt;</comment>
                            <comment id="158125" author="green" created="Fri, 8 Jul 2016 15:35:20 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;crash&amp;gt; p *(struct slab *)0xffff8813e0ab36c0
$2 = {
  list = {
    next = 0xffff8817213f0000, 
    prev = 0xffff881ae71d4540
  }, 
  colouroff = 0, 
  s_mem = 0xffff881ae71d4000, 
  inuse = 0, 
  free = 0, 
  nodeid = 1
}

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;kmem -S size-512 does not list content of this particular one because it is corrupted.&lt;br/&gt;
the s_mem location is also invalid, so I cannot peek inside.&lt;/p&gt;</comment>
                            <comment id="158557" author="bfaccini" created="Tue, 12 Jul 2016 22:18:34 +0000"  >&lt;p&gt;And what about the next = 0xffff8817213f0000 pointer, is it pointing to a valid area ? And if yes, of which type/family ? &lt;br/&gt;
Also, does it leads to a loop that may be the cause of the looping/pseudo-hung execution of s_show()? &lt;br/&gt;
Last, does the corruption of the slab at 0xffff8813e0ab36c0 seem to come from an overrun from previous locations ??&lt;/p&gt;</comment>
                            <comment id="158567" author="green" created="Tue, 12 Jul 2016 23:22:23 +0000"  >&lt;p&gt;Yes, the next pointer does point to a valid area:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;crash&amp;gt; p *(struct slab *)0xffff8817213f0000
$4 = {
  list = {
    next = 0xffff880b7de4b800, 
    prev = 0xffff880b01a70800
  }, 
  colouroff = 1214182228249458060, 
  s_mem = 0x0, 
  inuse = 2690861296, 
  free = 4294967295, 
  nodeid = 0
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I suspect it&apos;s the same type, but I am not sure how to check that easily.&lt;br/&gt;
I do not see a real loop in there with some light probing, but I see that there&apos;s an alternative valid path around this next node:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;crash&amp;gt; p *(struct slab *)0xffff880b7de4b800
$5 = {
  list = {
    next = 0xffff880c60693800, 
    prev = 0xffff8817213f0000
  }, 
  colouroff = 1214182228245329292, 
  s_mem = 0x0, 
  inuse = 2690860912, 
  free = 4294967295, 
  nodeid = 0
}
crash&amp;gt; p *(struct slab *)0xffff880b01a70800
$6 = {
  list = {
    next = 0xffff8817213f0000, 
    prev = 0xffff88180be3e340
  }, 
  colouroff = 1214182229118793100, 
  s_mem = 0x0, 
  inuse = 2690860912, 
  free = 4294967295, 
  nodeid = 0
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="160436" author="bfaccini" created="Mon, 1 Aug 2016 15:42:39 +0000"  >&lt;p&gt;After working with Oleg&apos;s help on the crash-dump for this ticket, it seems like there is strong evidence to suggest (similar Slab corruption signature seen during &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7980&quot; title=&quot;Overrun in generic &amp;lt;size-128&amp;gt; kmem_cache Slabs causing OSS to crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7980&quot;&gt;&lt;del&gt;LU-7980&lt;/del&gt;&lt;/a&gt; tracking) that this is a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7980&quot; title=&quot;Overrun in generic &amp;lt;size-128&amp;gt; kmem_cache Slabs causing OSS to crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7980&quot;&gt;&lt;del&gt;LU-7980&lt;/del&gt;&lt;/a&gt; and it would make sense to use the fix from that and see if that is sufficient.&lt;/p&gt;</comment>
                            <comment id="166924" author="mhanafi" created="Thu, 22 Sep 2016 16:10:26 +0000"  >&lt;p&gt;Close this case we will track &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7980&quot; title=&quot;Overrun in generic &amp;lt;size-128&amp;gt; kmem_cache Slabs causing OSS to crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7980&quot;&gt;&lt;del&gt;LU-7980&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="35796">LU-7980</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="22031" name="oss.bt.all" size="782418" author="mhanafi" created="Mon, 27 Jun 2016 22:09:12 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyfxb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>