<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:18:02 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1596] mds crash after recovery finished</title>
                <link>https://jira.whamcloud.com/browse/LU-1596</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;see attached console logs. MDS went through recover didn&apos;t finish it restarted recovery. At the end of send recovery cycle it crash&lt;/p&gt;

&lt;p&gt;crash is at line 1252&lt;br/&gt;
BUG: scheduling while atomic: mdt_33/5450/0xffff8800^M&lt;br/&gt;
BUG: unable to handle kernel paging request at 0000000000015fc0^M&lt;br/&gt;
IP: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81523c5e&amp;gt;&amp;#93;&lt;/span&gt; _spin_lock+0xe/0x30^M&lt;br/&gt;
PGD 5cc604067 PUD 5cc608067 PMD 0 ^M&lt;br/&gt;
Oops: 0002 &lt;a href=&quot;#1&quot; target=&quot;_blank&quot; rel=&quot;noopener&quot;&gt;1&lt;/a&gt; SMP ^M&lt;br/&gt;
last sysfs file: /sys/devices/pci0000:00/0000:00:1c.0/0000:03:00.0/host1/rport-1:0-0/target1:0:0/1:0:0:1/state^M&lt;br/&gt;
^M&lt;br/&gt;
Entering kdb (current=0xffffffff81a2d020, pid 0) on processor 0 Oops: (null)^M&lt;br/&gt;
due to oops @ 0xffffffff81523c5e^M&lt;br/&gt;
     r15 = 0x0000000000000003      r14 = 0x0000000000015fc0 ^M&lt;br/&gt;
     r13 = 0xffff880040e03998      r12 = 0xffff8804432c9500 ^M&lt;br/&gt;
      bp = 0xffff880040e03930       bx = 0x0000000000015fc0 ^M&lt;br/&gt;
     r11 = 0x0000000000000028      r10 = 0x0000000000000000 ^M&lt;br/&gt;
      r9 = 0xfbad0278679d3602       r8 = 0x0000000000000000 ^M&lt;br/&gt;
      ax = 0x0000000000010000       cx = 0xffff880040e03ab0 ^M&lt;br/&gt;
      dx = 0x0000000000000082       si = 0xffff880040e03998 ^M&lt;br/&gt;
      di = 0x0000000000015fc0  orig_ax = 0xffffffffffffffff ^M&lt;br/&gt;
      ip = 0xffffffff81523c5e       cs = 0x0000000000000010 ^M&lt;br/&gt;
   flags = 0x0000000000010006       sp = 0xffff880040e03920 ^M&lt;br/&gt;
      ss = 0x0000000000000018 &amp;amp;regs = 0xffff880040e03888^M&lt;/p&gt;</description>
                <environment>2.1.2 server&lt;br/&gt;
2.1.1and2.1.2 clients</environment>
        <key id="15122">LU-1596</key>
            <summary>mds crash after recovery finished</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="laisiyao">Lai Siyao</assignee>
                                    <reporter username="mhanafi">Mahmoud Hanafi</reporter>
                        <labels>
                    </labels>
                <created>Tue, 3 Jul 2012 16:34:46 +0000</created>
                <updated>Thu, 20 Sep 2012 19:25:08 +0000</updated>
                            <resolved>Thu, 20 Sep 2012 19:25:08 +0000</resolved>
                                    <version>Lustre 2.1.2</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="41467" author="pjones" created="Thu, 5 Jul 2012 01:59:54 +0000"  >&lt;p&gt;Lai&lt;/p&gt;

&lt;p&gt;Could you please look into this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="41476" author="laisiyao" created="Thu, 5 Jul 2012 05:05:52 +0000"  >&lt;p&gt;The log shows it BUG on pid 5450:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;BUG: scheduling while atomic: mdt_33/5450/0xffff8800
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The backtrace for 5450 is as below:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
mdt_33        D 0000000000000246     0  5450      2 0x3005ff40
 ffff8804432ca750 0000000000000018 ffff88061feaa378 0000000000000246
 ffff8804432ca780 ffffffffa00023ac ffff880621185400 ffff88042f278690
 ffff8804432ca7e0 ffff88061ffe8f20 ffff8804432ca7b0 ffffffff81254f74
Call Trace:
 [&amp;lt;ffffffffa00023ac&amp;gt;] ? dm_dispatch_request+0x3c/0x70 [dm_mod]
 [&amp;lt;ffffffff81254f74&amp;gt;] ? blk_unplug+0x34/0x70
 [&amp;lt;ffffffff811a7c90&amp;gt;] ? sync_buffer+0x0/0x50
 [&amp;lt;ffffffff81520d83&amp;gt;] ? printk+0x41/0x46
 [&amp;lt;ffffffff81056681&amp;gt;] ? __schedule_bug+0x41/0x70
 [&amp;lt;ffffffff81521978&amp;gt;] ? thread_return+0x638/0x760
 [&amp;lt;ffffffffa000420c&amp;gt;] ? dm_table_unplug_all+0x5c/0x100 [dm_mod]
 [&amp;lt;ffffffff8109ac19&amp;gt;] ? ktime_get_ts+0xa9/0xe0
 [&amp;lt;ffffffff811a7c90&amp;gt;] ? sync_buffer+0x0/0x50
 [&amp;lt;ffffffff81521b13&amp;gt;] ? io_schedule+0x73/0xc0
 [&amp;lt;ffffffff811a7cd0&amp;gt;] ? sync_buffer+0x40/0x50
 [&amp;lt;ffffffff815224cf&amp;gt;] ? __wait_on_bit+0x5f/0x90
 [&amp;lt;ffffffff811a7c90&amp;gt;] ? sync_buffer+0x0/0x50
 [&amp;lt;ffffffff81522578&amp;gt;] ? out_of_line_wait_on_bit+0x78/0x90
 [&amp;lt;ffffffff81090030&amp;gt;] ? wake_bit_function+0x0/0x50
 [&amp;lt;ffffffff811a7c86&amp;gt;] ? __wait_on_buffer+0x26/0x30
 [&amp;lt;ffffffffa0abb63c&amp;gt;] ? ldiskfs_mb_init_cache+0x24c/0xa30 [ldiskfs]
 [&amp;lt;ffffffffa0abbf3e&amp;gt;] ? ldiskfs_mb_init_group+0x11e/0x210 [ldiskfs]
 [&amp;lt;ffffffffa0abc0fd&amp;gt;] ? ldiskfs_mb_good_group+0xcd/0x110 [ldiskfs]
 [&amp;lt;ffffffffa0abf0bb&amp;gt;] ? ldiskfs_mb_regular_allocator+0x19b/0x410 [ldiskfs]
 [&amp;lt;ffffffff8152286e&amp;gt;] ? mutex_lock+0x1e/0x50
 [&amp;lt;ffffffffa0abf762&amp;gt;] ? ldiskfs_mb_new_blocks+0x432/0x660 [ldiskfs]
 [&amp;lt;ffffffffa0aa61fe&amp;gt;] ? ldiskfs_ext_find_extent+0x2ce/0x330 [ldiskfs]
 [&amp;lt;ffffffffa0aa93b4&amp;gt;] ? ldiskfs_ext_get_blocks+0x1114/0x1a10 [ldiskfs]
 [&amp;lt;ffffffffa0a7f541&amp;gt;] ? __jbd2_journal_file_buffer+0xd1/0x220 [jbd2]
 [&amp;lt;ffffffffa0a8075f&amp;gt;] ? jbd2_journal_dirty_metadata+0xff/0x150 [jbd2]
 [&amp;lt;ffffffffa0aa3f3b&amp;gt;] ? __ldiskfs_handle_dirty_metadata+0x7b/0x100 [ldiskfs]
 [&amp;lt;ffffffffa0ab1665&amp;gt;] ? ldiskfs_get_blocks+0xf5/0x2a0 [ldiskfs]
 [&amp;lt;ffffffffa0ad8048&amp;gt;] ? __ldiskfs_journal_stop+0x68/0xa0 [ldiskfs]
 [&amp;lt;ffffffffa0ab62b9&amp;gt;] ? ldiskfs_getblk+0x79/0x1f0 [ldiskfs]
 [&amp;lt;ffffffffa0ab6448&amp;gt;] ? ldiskfs_bread+0x18/0x80 [ldiskfs]
 [&amp;lt;ffffffffa0b5e2a7&amp;gt;] ? fsfilt_ldiskfs_write_handle+0x147/0x340 [fsfilt_ldiskfs]
 [&amp;lt;ffffffffa0b5e55c&amp;gt;] ? fsfilt_ldiskfs_write_record+0xbc/0x1d0 [fsfilt_ldiskfs]
 [&amp;lt;ffffffffa06382ec&amp;gt;] ? llog_lvfs_write_blob+0x2bc/0x460 [obdclass]
 [&amp;lt;ffffffffa06338b8&amp;gt;] ? llog_init_handle+0xa18/0xa70 [obdclass]
 [&amp;lt;ffffffffa0639b5a&amp;gt;] ? llog_lvfs_write_rec+0x40a/0xf00 [obdclass]
 [&amp;lt;ffffffffa0636a65&amp;gt;] ? llog_cat_add_rec+0xf5/0x840 [obdclass]
 [&amp;lt;ffffffffa063d306&amp;gt;] ? llog_obd_origin_add+0x56/0x190 [obdclass]
 [&amp;lt;ffffffffa063d4e1&amp;gt;] ? llog_add+0xa1/0x3c0 [obdclass]
 [&amp;lt;ffffffff8115c66a&amp;gt;] ? kmem_getpages+0xba/0x170
 [&amp;lt;ffffffffa09610cc&amp;gt;] ? lov_llog_origin_add+0xcc/0x5d0 [lov]
 [&amp;lt;ffffffffa063d4e1&amp;gt;] ? llog_add+0xa1/0x3c0 [obdclass]
 [&amp;lt;ffffffffa0b7449e&amp;gt;] ? mds_llog_origin_add+0xae/0x2e0 [mds]
 [&amp;lt;ffffffffa0ab0651&amp;gt;] ? __ldiskfs_get_inode_loc+0xf1/0x3b0 [ldiskfs]
 [&amp;lt;ffffffffa0995d0b&amp;gt;] ? lov_tgt_maxbytes+0x5b/0xb0 [lov]
 [&amp;lt;ffffffffa063d4e1&amp;gt;] ? llog_add+0xa1/0x3c0 [obdclass]
 [&amp;lt;ffffffffa0b74b12&amp;gt;] ? mds_llog_add_unlink+0x162/0x520 [mds]
 [&amp;lt;ffffffffa0b75206&amp;gt;] ? mds_log_op_unlink+0x196/0x9a0 [mds]
 [&amp;lt;ffffffffa0b9d7de&amp;gt;] ? mdd_unlink_log+0x4e/0x100 [mdd]
 [&amp;lt;ffffffffa0b973ab&amp;gt;] ? mdd_attr_get_internal+0x7ab/0xb10 [mdd]
 [&amp;lt;ffffffffa0b91a8e&amp;gt;] ? mdd_object_kill+0x14e/0x1b0 [mdd]
 [&amp;lt;ffffffffa0bab8ce&amp;gt;] ? mdd_finish_unlink+0x20e/0x2c0 [mdd]
 [&amp;lt;ffffffffa0baa780&amp;gt;] ? __mdd_ref_del+0x40/0xc0 [mdd]
 [&amp;lt;ffffffffa0bb704c&amp;gt;] ? mdd_rename+0x1ffc/0x2240 [mdd]
 [&amp;lt;ffffffffa0b96dcb&amp;gt;] ? mdd_attr_get_internal+0x1cb/0xb10 [mdd]
 [&amp;lt;ffffffffa0589caf&amp;gt;] ? cfs_hash_bd_from_key+0x3f/0xc0 [libcfs]
 [&amp;lt;ffffffffa0c7f219&amp;gt;] ? cmm_mode_get+0x109/0x320 [cmm]
 [&amp;lt;ffffffffa0c7fd3a&amp;gt;] ? cml_rename+0x33a/0xbb0 [cmm]
 [&amp;lt;ffffffffa058a337&amp;gt;] ? cfs_hash_bd_get+0x37/0x90 [libcfs]
 [&amp;lt;ffffffffa0c7f49d&amp;gt;] ? cmm_is_subdir+0x6d/0x2f0 [cmm]
 [&amp;lt;ffffffffa067d8e6&amp;gt;] ? lu_object_put+0x86/0x210 [obdclass]
 [&amp;lt;ffffffffa0c09426&amp;gt;] ? mdt_reint_rename+0x1fa6/0x2400 [mdt]
 [&amp;lt;ffffffffa05909db&amp;gt;] ? upcall_cache_get_entry+0x28b/0xa14 [libcfs]
 [&amp;lt;ffffffffa0c0197f&amp;gt;] ? mdt_rename_unpack+0x46f/0x6c0 [mdt]
 [&amp;lt;ffffffffa0bb96c6&amp;gt;] ? md_ucred+0x26/0x60 [mdd]
 [&amp;lt;ffffffffa0c01c0f&amp;gt;] ? mdt_reint_rec+0x3f/0x100 [mdt]
 [&amp;lt;ffffffffa07726e4&amp;gt;] ? lustre_msg_get_flags+0x34/0xa0 [ptlrpc]
 [&amp;lt;ffffffffa0bfa004&amp;gt;] ? mdt_reint_internal+0x6d4/0x9f0 [mdt]
 [&amp;lt;ffffffffa0bef9f6&amp;gt;] ? mdt_reint_opcode+0x96/0x160 [mdt]
 [&amp;lt;ffffffffa0bfa36c&amp;gt;] ? mdt_reint+0x4c/0x120 [mdt]
 [&amp;lt;ffffffffa07721b8&amp;gt;] ? lustre_msg_check_version+0xc8/0xe0 [ptlrpc]
 [&amp;lt;ffffffffa0bedc65&amp;gt;] ? mdt_handle_common+0x8d5/0x1810 [mdt]
 [&amp;lt;ffffffffa076fe44&amp;gt;] ? lustre_msg_get_opc+0x94/0x100 [ptlrpc]
 [&amp;lt;ffffffffa0beec75&amp;gt;] ? mdt_regular_handle+0x15/0x20 [mdt]
 [&amp;lt;ffffffffa0780b89&amp;gt;] ? ptlrpc_main+0xbb9/0x1990 [ptlrpc]
 [&amp;lt;ffffffffa077ffd0&amp;gt;] ? ptlrpc_main+0x0/0x1990 [ptlrpc]
 [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
 [&amp;lt;ffffffffa077ffd0&amp;gt;] ? ptlrpc_main+0x0/0x1990 [ptlrpc]
 [&amp;lt;ffffffffa077ffd0&amp;gt;] ? ptlrpc_main+0x0/0x1990 [ptlrpc]
 [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The backtrace looks normal (though the stack is a bit deep), but it reported schedule while atomic. Alex, could you give some hint?&lt;/p&gt;</comment>
                            <comment id="42901" author="jaylan" created="Wed, 8 Aug 2012 20:30:40 +0000"  >&lt;p&gt;We hit this bug again last night. &lt;br/&gt;
This time it was not during reboot as was described when we open this ticket.&lt;/p&gt;

&lt;p&gt;Here are what shown on console:&lt;/p&gt;

&lt;p&gt;Lustre: MGS: haven&apos;t heard from client af2ee3e4-cf7b-2e45-4650-a3f788979f14 (at 10.151.6.3@o2ib) in 227 seconds. I think it&apos;s dead, and I am evicting it. exp ffff8811f27af000, cur 1344393811 expire 1344393661 last 1344393584^M&lt;br/&gt;
Lustre: Skipped 277 previous similar messages^M&lt;br/&gt;
Lustre: MGS: haven&apos;t heard from client 587217b0-8285-b0d5-1696-6665442381c8 (at 10.151.5.129@o2ib) in 227 seconds. I think it&apos;s dead, and I am evicting it. exp ffff8811f57aa800, cur 1344393811 expire 1344393661 last 1344393584^M&lt;br/&gt;
LustreError: 9602:0:(llog_cat.c:298:llog_cat_add_rec()) llog_write_rec -28: lh=ffff881032b9d8c0^M&lt;br/&gt;
BUG: scheduling while atomic: mdt_268/9115/0xffff8809^M&lt;br/&gt;
BUG: unable to handle kernel paging request at 0000000181c1fa20^M&lt;br/&gt;
IP: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81051ddd&amp;gt;&amp;#93;&lt;/span&gt; task_rq_lock+0x4d/0xa0^M&lt;br/&gt;
PGD 0 ^M&lt;br/&gt;
Oops: 0000 &lt;a href=&quot;#1&quot; target=&quot;_blank&quot; rel=&quot;noopener&quot;&gt;1&lt;/a&gt; SMP ^M&lt;br/&gt;
last sysfs file: /sys/devices/pci0000:00/0000:00:1c.0/0000:03:00.0/0000:04:04.1/irq^M&lt;br/&gt;
^M&lt;br/&gt;
Entering kdb (current=0xffff881227f13500, pid 0) on processor 10 Oops: (null)^M&lt;br/&gt;
due to oops @ 0xffffffff81051ddd^M&lt;br/&gt;
     r15 = 0x0000000000000003      r14 = 0x0000000000015fc0 ^M&lt;br/&gt;
     r13 = 0xffff88095fc83978      r12 = 0xffff8811da902a80 ^M&lt;br/&gt;
      bp = 0xffff88095fc83940       bx = 0x0000000000015fc0 ^M&lt;br/&gt;
     r11 = 0x0000000000000028      r10 = 0x0000000000000000 ^M&lt;br/&gt;
      r9 = 0xf9ffdd6e00faa202       r8 = 0x0000000000000000 ^M&lt;br/&gt;
      ax = 0x0000000040010dc0       cx = 0xffff88095fc83a90 ^M&lt;br/&gt;
      dx = 0x0000000000000082       si = 0xffff88095fc83978 ^M&lt;br/&gt;
      di = 0xffff8811da902a80  orig_ax = 0xffffffffffffffff ^M&lt;br/&gt;
      ip = 0xffffffff81051ddd       cs = 0x0000000000000010 ^M&lt;br/&gt;
   flags = 0x0000000000010082       sp = 0xffff88095fc83910 ^M&lt;br/&gt;
      ss = 0x0000000000000018 &amp;amp;regs = 0xffff88095fc83878^M&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;10&amp;#93;&lt;/span&gt;kdb&amp;gt; &lt;/p&gt;

&lt;p&gt;The crash analysis showed the stack trace of the task that caused the crash:&lt;/p&gt;

&lt;p&gt;&amp;#8212; &amp;lt;NMI exception stack&amp;gt; &amp;#8212;&lt;br/&gt;
 #6 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63ae8&amp;#93;&lt;/span&gt; oops_begin at ffffffff81524f1e&lt;br/&gt;
 #7 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63b00&amp;#93;&lt;/span&gt; no_context at ffffffff8104230c&lt;br/&gt;
 #8 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63b50&amp;#93;&lt;/span&gt; __bad_area_nosemaphore at ffffffff81042615&lt;br/&gt;
 #9 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63ba0&amp;#93;&lt;/span&gt; bad_area_nosemaphore at ffffffff810426e3&lt;br/&gt;
#10 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63bb0&amp;#93;&lt;/span&gt; __do_page_fault at ffffffff81042d9d&lt;br/&gt;
#11 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63cd0&amp;#93;&lt;/span&gt; do_page_fault at ffffffff81526e5e&lt;br/&gt;
#12 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63d00&amp;#93;&lt;/span&gt; page_fault at ffffffff81524175&lt;br/&gt;
    &lt;span class=&quot;error&quot;&gt;&amp;#91;exception RIP: update_curr+324&amp;#93;&lt;/span&gt;&lt;br/&gt;
    RIP: ffffffff81051974  RSP: ffff88095fc63db8  RFLAGS: 00010086&lt;br/&gt;
    RAX: ffff8811da902a80  RBX: 0000000040010dc0  RCX: ffff881227d103c0&lt;br/&gt;
    RDX: 0000000000018b48  RSI: 0000000000000000  RDI: 0000000000000001&lt;br/&gt;
    RBP: ffff88095fc63de8   R8: 0000000000000000   R9: 00000000001208e0&lt;br/&gt;
    R10: 0000000000000010  R11: 00000000001208e0  R12: ffff88095fc76028&lt;br/&gt;
    R13: 000001131ede6d18  R14: 00014b731e02665b  R15: ffff8811da902a80&lt;br/&gt;
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018&lt;br/&gt;
#13 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63df0&amp;#93;&lt;/span&gt; task_tick_fair at ffffffff81052bab&lt;br/&gt;
#14 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63e20&amp;#93;&lt;/span&gt; scheduler_tick at ffffffff810568d1&lt;br/&gt;
#15 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63e60&amp;#93;&lt;/span&gt; update_process_times at ffffffff8107b5e2&lt;br/&gt;
#16 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63e90&amp;#93;&lt;/span&gt; tick_sched_timer at ffffffff8109ffe6&lt;br/&gt;
#17 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63ec0&amp;#93;&lt;/span&gt; __run_hrtimer at ffffffff8109476e&lt;br/&gt;
#18 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63f10&amp;#93;&lt;/span&gt; hrtimer_interrupt at ffffffff81094b16&lt;br/&gt;
#19 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63f90&amp;#93;&lt;/span&gt; smp_apic_timer_interrupt at ffffffff815297cb&lt;br/&gt;
#20 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff88095fc63fb0&amp;#93;&lt;/span&gt; apic_timer_interrupt at ffffffff8100bc13&lt;br/&gt;
&amp;#8212; &amp;lt;IRQ stack&amp;gt; &amp;#8212;&lt;br/&gt;
#21 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa6a8&amp;#93;&lt;/span&gt; apic_timer_interrupt at ffffffff8100bc13&lt;br/&gt;
    &lt;span class=&quot;error&quot;&gt;&amp;#91;exception RIP: vprintk+465&amp;#93;&lt;/span&gt;&lt;br/&gt;
    RIP: ffffffff81069c61  RSP: ffff8811da8aa750  RFLAGS: 00000246&lt;br/&gt;
    RAX: 0000000000010e28  RBX: ffff8811da8aa7e0  RCX: 0000000000001b45&lt;br/&gt;
    RDX: ffff88095fc60000  RSI: 0000000000000046  RDI: 0000000000000246&lt;br/&gt;
    RBP: ffffffff8100bc0e   R8: ffffffff81ba2580   R9: 0000000000000000&lt;br/&gt;
    R10: 0000000000000007  R11: 000000000000000f  R12: 00000000000b29d3&lt;br/&gt;
    R13: ffffffff81068fa5  R14: ffff8811da8aa6e0  R15: 0000000000000046&lt;br/&gt;
    ORIG_RAX: ffffffffffffff10  CS: 0010  SS: 0018&lt;br/&gt;
#22 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa7e8&amp;#93;&lt;/span&gt; printk at ffffffff81520d83&lt;br/&gt;
#23 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa848&amp;#93;&lt;/span&gt; __schedule_bug at ffffffff81056681&lt;br/&gt;
#24 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa868&amp;#93;&lt;/span&gt; thread_return at ffffffff81521978&lt;br/&gt;
#25 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa928&amp;#93;&lt;/span&gt; io_schedule at ffffffff81521b13&lt;br/&gt;
#26 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa948&amp;#93;&lt;/span&gt; sync_buffer at ffffffff811a7cd0&lt;br/&gt;
#27 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa958&amp;#93;&lt;/span&gt; __wait_on_bit at ffffffff815224cf&lt;br/&gt;
#28 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aa9a8&amp;#93;&lt;/span&gt; out_of_line_wait_on_bit at ffffffff81522578&lt;br/&gt;
#29 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aaa18&amp;#93;&lt;/span&gt; __wait_on_buffer at ffffffff811a7c86&lt;br/&gt;
#30 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aaa28&amp;#93;&lt;/span&gt; ldiskfs_mb_init_cache at ffffffffa0b4f63c &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#31 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aab08&amp;#93;&lt;/span&gt; ldiskfs_mb_init_group at ffffffffa0b4ff3e &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#32 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aab58&amp;#93;&lt;/span&gt; ldiskfs_mb_good_group at ffffffffa0b500fd &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#33 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aab98&amp;#93;&lt;/span&gt; ldiskfs_mb_regular_allocator at ffffffffa0b530bb &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#34 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aac48&amp;#93;&lt;/span&gt; ldiskfs_mb_new_blocks at ffffffffa0b53762 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#35 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aace8&amp;#93;&lt;/span&gt; ldiskfs_ext_get_blocks at ffffffffa0b3d3b4 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#36 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aae58&amp;#93;&lt;/span&gt; ldiskfs_get_blocks at ffffffffa0b45665 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#37 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aaed8&amp;#93;&lt;/span&gt; ldiskfs_getblk at ffffffffa0b4a2b9 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#38 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aaf98&amp;#93;&lt;/span&gt; ldiskfs_bread at ffffffffa0b4a448 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#39 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aafc8&amp;#93;&lt;/span&gt; fsfilt_ldiskfs_write_handle at ffffffffa0bf72a7 &lt;span class=&quot;error&quot;&gt;&amp;#91;fsfilt_ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#40 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab078&amp;#93;&lt;/span&gt; fsfilt_ldiskfs_write_record at ffffffffa0bf755c &lt;span class=&quot;error&quot;&gt;&amp;#91;fsfilt_ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
#41 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab0f8&amp;#93;&lt;/span&gt; llog_lvfs_write_blob at ffffffffa06c32ec &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#42 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab1a8&amp;#93;&lt;/span&gt; llog_lvfs_write_rec at ffffffffa06c502e &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#43 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab288&amp;#93;&lt;/span&gt; llog_cat_add_rec at ffffffffa06c1a65 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#44 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab308&amp;#93;&lt;/span&gt; llog_obd_origin_add at ffffffffa06c8306 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#45 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab368&amp;#93;&lt;/span&gt; llog_add at ffffffffa06c84e1 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#46 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab3d8&amp;#93;&lt;/span&gt; lov_llog_origin_add at ffffffffa09f60cc &lt;span class=&quot;error&quot;&gt;&amp;#91;lov&amp;#93;&lt;/span&gt;&lt;br/&gt;
#47 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab488&amp;#93;&lt;/span&gt; llog_add at ffffffffa06c84e1 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#48 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab4f8&amp;#93;&lt;/span&gt; mds_llog_origin_add at ffffffffa0c1349e &lt;span class=&quot;error&quot;&gt;&amp;#91;mds&amp;#93;&lt;/span&gt;&lt;br/&gt;
#49 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab578&amp;#93;&lt;/span&gt; llog_add at ffffffffa06c84e1 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
#50 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab5e8&amp;#93;&lt;/span&gt; mds_llog_add_unlink at ffffffffa0c13b12 &lt;span class=&quot;error&quot;&gt;&amp;#91;mds&amp;#93;&lt;/span&gt;&lt;br/&gt;
#51 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab668&amp;#93;&lt;/span&gt; mds_log_op_unlink at ffffffffa0c14206 &lt;span class=&quot;error&quot;&gt;&amp;#91;mds&amp;#93;&lt;/span&gt;&lt;br/&gt;
#52 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab6f8&amp;#93;&lt;/span&gt; mdd_unlink_log at ffffffffa0c3c7de &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
#53 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab758&amp;#93;&lt;/span&gt; mdd_object_kill at ffffffffa0c30a8e &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
#54 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab7b8&amp;#93;&lt;/span&gt; mdd_finish_unlink at ffffffffa0c4a8ce &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
#55 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab838&amp;#93;&lt;/span&gt; mdd_rename at ffffffffa0c5604c &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
#56 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8ab9b8&amp;#93;&lt;/span&gt; cml_rename at ffffffffa0d1ed3a &lt;span class=&quot;error&quot;&gt;&amp;#91;cmm&amp;#93;&lt;/span&gt;&lt;br/&gt;
#57 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8aba68&amp;#93;&lt;/span&gt; mdt_reint_rename at ffffffffa0ca8426 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#58 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abbc8&amp;#93;&lt;/span&gt; mdt_reint_rec at ffffffffa0ca0c0f &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#59 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abc18&amp;#93;&lt;/span&gt; mdt_reint_internal at ffffffffa0c99004 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#60 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abca8&amp;#93;&lt;/span&gt; mdt_reint at ffffffffa0c9936c &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#61 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abcf8&amp;#93;&lt;/span&gt; mdt_handle_common at ffffffffa0c8cc65 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#62 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abd78&amp;#93;&lt;/span&gt; mdt_regular_handle at ffffffffa0c8dc75 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
#63 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abd88&amp;#93;&lt;/span&gt; ptlrpc_main at ffffffffa080bd49 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
#64 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abf48&amp;#93;&lt;/span&gt; kernel_thread at ffffffff8100c14a&lt;/p&gt;</comment>
                            <comment id="42902" author="jaylan" created="Wed, 8 Aug 2012 20:35:30 +0000"  >&lt;p&gt;Our lustre git source is at &lt;br/&gt;
&lt;a href=&quot;https://github.com/jlan/lustre-nas/commits/nas-2.1.2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/jlan/lustre-nas/commits/nas-2.1.2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The original report was built on tag 2.1.2-1nasS. The crash last night was built on tag 2.1.2-2nasS.&lt;/p&gt;</comment>
                            <comment id="42903" author="jaylan" created="Wed, 8 Aug 2012 20:45:03 +0000"  >&lt;p&gt;Can we modify the summary to &quot;BUG: scheduling while atomic&quot;? The current summary does not describe the crash of last night.&lt;/p&gt;</comment>
                            <comment id="42925" author="mhanafi" created="Thu, 9 Aug 2012 04:39:55 +0000"  >&lt;p&gt;We hit this again. Can we increase this to critical&lt;/p&gt;</comment>
                            <comment id="42927" author="laisiyao" created="Thu, 9 Aug 2012 04:53:30 +0000"  >&lt;p&gt;There is a log message reporting -ENOSPC:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 9602:0:(llog_cat.c:298:llog_cat_add_rec()) llog_write_rec -28: lh=ffff881032b9d8c0^M
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Could you verify it&apos;s true on MDS?&lt;/p&gt;</comment>
                            <comment id="42949" author="jaylan" created="Thu, 9 Aug 2012 13:31:29 +0000"  >&lt;p&gt;I saw the -ENOSPC LustreError immediately before the crash. However, when I analyzed the vmcore with crash, &quot;kmem -i&quot; showed MDS had plenty of memory for the system:&lt;/p&gt;

&lt;p&gt;              PAGES        TOTAL      PERCENTAGE&lt;br/&gt;
 TOTAL MEM  18494890      70.6 GB         ----&lt;br/&gt;
      FREE  11790336        45 GB   63% of TOTAL MEM&lt;br/&gt;
      USED  6704554      25.6 GB   36% of TOTAL MEM&lt;br/&gt;
    SHARED  2663959      10.2 GB   14% of TOTAL MEM&lt;br/&gt;
   BUFFERS  2603900       9.9 GB   14% of TOTAL MEM&lt;br/&gt;
    CACHED   119807       468 MB    0% of TOTAL MEM&lt;br/&gt;
      SLAB  2185165       8.3 GB   11% of TOTAL MEM&lt;/p&gt;

&lt;p&gt;TOTAL SWAP   500013       1.9 GB         ----&lt;br/&gt;
 SWAP USED        0            0    0% of TOTAL SWAP&lt;br/&gt;
 SWAP FREE   500013       1.9 GB  100% of TOTAL SWAP&lt;/p&gt;

&lt;p&gt;Did we run out of some preallocated memory? Hmmm...&lt;/p&gt;</comment>
                            <comment id="42951" author="jaylan" created="Thu, 9 Aug 2012 13:37:37 +0000"  >&lt;p&gt;On the other hand, -ENOSPC (or -28) was not reported in the crash last night (the 2nd crash in two consecutive nights.) So, it should not be the reason.&lt;/p&gt;</comment>
                            <comment id="42959" author="green" created="Thu, 9 Aug 2012 15:54:17 +0000"  >&lt;p&gt;The problem at hand seems to be stack overflow, so patches from lu-969 should help&lt;/p&gt;</comment>
                            <comment id="42965" author="green" created="Thu, 9 Aug 2012 16:22:03 +0000"  >&lt;p&gt;so the three patches you need are:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#change,2668&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,2668&lt;/a&gt;&lt;br/&gt;
then &lt;a href=&quot;http://review.whamcloud.com/3034&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3034&lt;/a&gt;&lt;br/&gt;
and then &lt;a href=&quot;http://review.whamcloud.com/3072&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3072&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="42972" author="pjones" created="Thu, 9 Aug 2012 17:42:52 +0000"  >&lt;p&gt;Following up on the action in the meeting earlier. I spoke to Oleg about whether it was possible to predict when stack overflows would occur and he confirmed that it was a combination of independent factors that resulted in a stack overflow so it was not possible to anticipate ahead of time.&lt;/p&gt;</comment>
                            <comment id="42978" author="jaylan" created="Thu, 9 Aug 2012 19:12:03 +0000"  >&lt;p&gt;Oleg, how do I decide it was a stack overflow?&lt;/p&gt;

&lt;p&gt;PID: 9115   TASK: ffff8811da902a80  CPU: 9   COMMAND: &quot;mdt_268&quot;&lt;/p&gt;

&lt;p&gt;&quot;bt&quot; command showed the stack started at &lt;br/&gt;
#64 &lt;span class=&quot;error&quot;&gt;&amp;#91;ffff8811da8abf48&amp;#93;&lt;/span&gt; kernel_thread at ffffffff8100c14a&lt;/p&gt;

&lt;p&gt;and,&lt;/p&gt;

&lt;p&gt;struct task_struct {&lt;br/&gt;
  state = 2, &lt;br/&gt;
  stack = 0xffff8811da8aa000, &lt;/p&gt;

&lt;p&gt;The &quot;RSP: ffff8811da8aa750&quot; when interrupted, so I assumed it did not overflow&lt;br/&gt;
the per cpu stack yet?&lt;/p&gt;

&lt;p&gt;And, when the PageFault EXCEPTION hit the IRQ stack, &lt;br/&gt;
the &quot;RSP: ffff88095fc63db8&quot; looks still within the IRQ stack?&lt;/p&gt;

&lt;p&gt;How do I determine I have had a stack overflow? Is there a command in &quot;crash&quot;&lt;br/&gt;
to display stack sizes being used? Thanks!&lt;/p&gt;

&lt;p&gt;Thanks!&lt;/p&gt;</comment>
                            <comment id="42983" author="green" created="Thu, 9 Aug 2012 20:32:54 +0000"  >&lt;p&gt;The stacktrace is pretty long, as you can see, and that&apos;s not all of it, there was more as we dipped into device driver, and that&apos;s what stepped on the struct thread_info enough to just overwrite the flags but not spill over too much into other spaces.&lt;br/&gt;
And those drivers might be mighty fat in their stack usage too. (heck, even sync_buffer to schedule bug shaved 0x100 bytes off the stack and that&apos;s mere 3 functions in between).&lt;br/&gt;
there are a bit less than 0x6a8 bytes left (the interrupt stack is in a different place so does not play a role).&lt;/p&gt;

&lt;p&gt;also note the crash happened in task_rq_lock in the interrupt after the warnign was already printed by the so happened interrupt. the task rq lock happened to be garbage already which further reinforces this theory.&lt;br/&gt;
We have seen many of cashes like this before.&lt;/p&gt;

&lt;p&gt;There is no command in crash to catch a stack overflow that managed not to crash while in overflow and return back inside of the stack but merely corrupted thread info before retracting back, but you can dump struct_thread_info content and see a lot of it is garbage, I guess.&lt;/p&gt;</comment>
                            <comment id="43132" author="jaylan" created="Mon, 13 Aug 2012 13:36:59 +0000"  >&lt;p&gt;Since the 3 patches Oleg suggested on Aug 9 were quite extensive, would it be OK if we just apply the patches to MDS only? Would the patches require OSS&apos;es also run the same set of patches?&lt;/p&gt;</comment>
                            <comment id="43357" author="green" created="Thu, 16 Aug 2012 14:32:14 +0000"  >&lt;p&gt;You can apply them to MDS only if that&apos;s where you experience the problem, but they would be beneficial everywhere, we have seen stack overflows on clients too, usually when reexporting nfs.&lt;/p&gt;

&lt;p&gt;BTW, I now have combined patch instead of the three I referenced, if that makes your life easier: &lt;a href=&quot;http://review.whamcloud.com/3623&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3623&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="45309" author="jaylan" created="Thu, 20 Sep 2012 19:23:36 +0000"  >&lt;p&gt;We have run with patch of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-969&quot; title=&quot;2.1 client stack overruns&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-969&quot;&gt;&lt;del&gt;LU-969&lt;/del&gt;&lt;/a&gt; (commit 2d77b00).&lt;br/&gt;
Peter indicated 2.1.3 release contains a complete solution. In that case,&lt;br/&gt;
this ticket can be closed for duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-969&quot; title=&quot;2.1 client stack overruns&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-969&quot;&gt;&lt;del&gt;LU-969&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="45310" author="pjones" created="Thu, 20 Sep 2012 19:25:08 +0000"  >&lt;p&gt;duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-969&quot; title=&quot;2.1 client stack overruns&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-969&quot;&gt;&lt;del&gt;LU-969&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="11670" name="s160-crash-7.3.2012" size="1161714" author="mhanafi" created="Tue, 3 Jul 2012 16:34:46 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvgs7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6368</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>