<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:31:43 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3188] IOR fails due to client stack overrun</title>
                <link>https://jira.whamcloud.com/browse/LU-3188</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This is currently killing all IOR runs on Hyperion:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;2013-04-17 15:55:20 BUG: scheduling &lt;span class=&quot;code-keyword&quot;&gt;while&lt;/span&gt; atomic: ior/44672/0x10000002
2013-04-17 15:55:20 BUG: unable to handle kernel paging request at fffffffceb9ee000
2013-04-17 15:55:20 IP: [&amp;lt;ffffffff810568e4&amp;gt;] update_curr+0x144/0x1f0
2013-04-17 15:55:20 PGD 1a87067 PUD 0
2013-04-17 15:55:20 &lt;span class=&quot;code-object&quot;&gt;Thread&lt;/span&gt; overran stack, or stack corrupted
2013-04-17 15:55:20 Oops: 0000 [#1] SMP
2013-04-17 15:55:20 last sysfs file: /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0/infiniband/mlx4_0/ports/1/pkeys/127
2013-04-17 15:55:20 CPU 25
2013-04-17 15:55:20 Modules linked in: lmv(U) mgc(U) lustre(U) lov(U) osc(U) mdc(U) fid(U) fld(U) ko2iblnd(U) ptlrpc(U) obdclass(U) lnet(U) lvfs(U) libcfs(U) sha512_generic sha256_generic ipmi_devintf acpi_cpufreq freq_table mperf ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_addr mlx4_ib ib_sa ib_mad iw_cxgb4 iw_cxgb3 ib_core dm_mirror dm_region_hash dm_log dm_mod vhost_net macvtap macvlan tun kvm sg sd_mod crc_t10dif wmi dcdbas sb_edac edac_core i2c_i801 i2c_core iTCO_wdt iTCO_vendor_support ahci shpchp ioatdma nfs lockd fscache auth_rpcgss nfs_acl sunrpc mlx4_en mlx4_core igb dca ptp pps_core be2iscsi bnx2i cnic uio ipv6 cxgb4i cxgb4 cxgb3i libcxgbi cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi [last unloaded: cpufreq_ondemand]
2013-04-17 15:55:20
2013-04-17 15:55:20 Pid: 44672, comm: ior Not tainted 2.6.32-358.2.1.el6.x86_64 #1 Dell Inc. PowerEdge C6220/0HYFFG
2013-04-17 15:55:20 RIP: 0010:[&amp;lt;ffffffff810568e4&amp;gt;]  [&amp;lt;ffffffff810568e4&amp;gt;] update_curr+0x144/0x1f0
2013-04-17 15:55:20 RSP: 0018:ffff88089c523db8  EFLAGS: 00010086
2013-04-17 15:55:20 RAX: ffff88086f748080 RBX: ffffffffad3be048 RCX: ffff880877f101c0
2013-04-17 15:55:20 RDX: 00000000000192d8 RSI: 0000000000000000 RDI: ffff88086f7480b8
2013-04-17 15:55:20 RBP: ffff88089c523de8 R08: ffffffff8160bb65 R09: 0000000000000007
2013-04-17 15:55:20 R10: 0000000000000010 R11: 0000000000000007 R12: ffff88089c536768
2013-04-17 15:55:20 R13: 000000000080f9df R14: 0000a8ac18cddce3 R15: ffff88086f748080
2013-04-17 15:55:20 FS:  00002aaaafebf8c0(0000) GS:ffff88089c520000(0000) knlGS:0000000000000000
2013-04-17 15:55:20 CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
2013-04-17 15:55:20 CR2: fffffffceb9ee000 CR3: 000000105cb6c000 CR4: 00000000000407e0
2013-04-17 15:55:20 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
2013-04-17 15:55:20 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
2013-04-17 15:55:20 &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; ior (pid: 44672, threadinfo ffff8807ad3be000, task ffff88086f748080)
2013-04-17 15:55:20 Stack:
2013-04-17 15:55:20  ffff88089c523dc8 ffffffff81013783 ffff88086f7480b8 ffff88089c536768
2013-04-17 15:55:20 &amp;lt;d&amp;gt; 0000000000000000 0000000000000000 ffff88089c523e18 ffffffff81056e9b
2013-04-17 15:55:20 &amp;lt;d&amp;gt; ffff88089c536700 0000000000000019 0000000000016700 0000000000000019
2013-04-17 15:55:20 Call Trace:

2013-04-17 15:55:20  &amp;lt;IRQ&amp;gt;
2013-04-17 15:55:20  [&amp;lt;ffffffff81013783&amp;gt;] ? native_sched_clock+0x13/0x80
2013-04-17 15:55:20 BUG: unable to handle kernel paging request at 000000000001400d
2013-04-17 15:55:20 IP: [&amp;lt;ffffffff8100f4dd&amp;gt;] print_context_stack+0xad/0x140
2013-04-17 15:55:20 PGD 1067f2c067 PUD 105b956067 PMD 0
2013-04-17 15:55:20 &lt;span class=&quot;code-object&quot;&gt;Thread&lt;/span&gt; overran stack, or stack corrupted
2013-04-17 15:55:20 Oops: 0000 [#2] SMP
2013-04-17 15:55:20 last sysfs file: /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0/infiniband/mlx4_0/ports/1/pkeys/127
2013-04-17 15:55:20 CPU 25
2013-04-17 15:55:20 Modules linked in: lmv(U) mgc(U) lustre(U) lov(U) osc(U) mdc(U) fid(U) fld(U) ko2iblnd(U) ptlrpc(U) obdclass(U) lnet(U) lvfs(U) libcfs(U) sha512_generic sha256_generic ipmi_devintf acpi_cpufreq freq_table mperf ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_addr mlx4_ib ib_sa ib_mad iw_cxgb4 iw_cxgb3 ib_core dm_mirror dm_region_hash dm_log dm_mod vhost_net macvtap macvlan tun kvm sg sd_mod crc_t10dif wmi dcdbas sb_edac edac_core i2c_i801 i2c_core iTCO_wdt iTCO_vendor_support ahci shpchp ioatdma nfs lockd fscache auth_rpcgss nfs_acl sunrpc mlx4_en mlx4_core igb dca ptp pps_core be2iscsi bnx2i cnic uio ipv6 cxgb4i cxgb4 cxgb3i libcxgbi cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi [last unloaded: cpufreq_ondemand]
2013-04-17 15:55:21
2013-04-17 15:55:21 Pid: 44672, comm: ior Not tainted 2.6.32-358.2.1.el6.x86_64 #1 Dell Inc. PowerEdge C6220/0HYFFG
2013-04-17 15:55:21 RIP: 0010:[&amp;lt;ffffffff8100f4dd&amp;gt;]  [&amp;lt;ffffffff8100f4dd&amp;gt;] print_context_stack+0xad/0x140
2013-04-17 15:55:21 RSP: 0018:ffff88089c5238c8  EFLAGS: 00010006
2013-04-17 15:55:21 RAX: 0000000000013625 RBX: ffff88089c523dc0 RCX: 00000000000016f5
2013-04-17 15:55:21 RDX: 0000000000000000 RSI: 0000000000000046 RDI: 0000000000000046
2013-04-17 15:55:21 RBP: ffff88089c523928 R08: 0000000000000000 R09: ffffffff8163fde0
2013-04-17 15:55:21 R10: 0000000000000001 R11: 0000000000000000 R12: ffff88089c523de8
2013-04-17 15:55:21 R13: ffff8807ad3be000 R14: ffffffff81600460 R15: ffff88089c523fc0
2013-04-17 15:55:21 FS:  00002aaaafebf8c0(0000) GS:ffff88089c520000(0000) knlGS:0000000000000000
2013-04-17 15:55:21 CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
2013-04-17 15:55:21 CR2: 000000000001400d CR3: 000000105cb6c000 CR4: 00000000000407e0
2013-04-17 15:55:21 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
2013-04-17 15:55:21 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
2013-04-17 15:55:21 &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; ior (pid: 44672, threadinfo ffff8807ad3be000, task ffff88086f748080)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;It never completes dumping the stack, instead it hits this BUG in a loop until the kernel stack is corrupt, then the node reboots. &lt;br/&gt;
Will retest with SWL and see if a change in parameters helps&lt;/p&gt;</description>
                <environment>Hyperion/LLNL</environment>
        <key id="18448">LU-3188</key>
            <summary>IOR fails due to client stack overrun</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="cliffw">Cliff White</reporter>
                        <labels>
                            <label>HB</label>
                    </labels>
                <created>Wed, 17 Apr 2013 23:08:42 +0000</created>
                <updated>Sat, 11 Oct 2014 13:50:58 +0000</updated>
                            <resolved>Mon, 23 Jun 2014 21:36:09 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="56521" author="cliffw" created="Wed, 17 Apr 2013 23:41:59 +0000"  >&lt;p&gt;Chris from LLNL indicates the stack overflow might be indicating a potential stack overflow issue with Lustre, or our kernel.&lt;/p&gt;</comment>
                            <comment id="56527" author="pjones" created="Thu, 18 Apr 2013 01:02:44 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Could you please look into this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="56532" author="bobijam" created="Thu, 18 Apr 2013 06:38:44 +0000"  >&lt;p&gt;is there any more information? better get another hit and collect more information.&lt;/p&gt;</comment>
                            <comment id="56544" author="cliffw" created="Thu, 18 Apr 2013 15:49:01 +0000"  >&lt;p&gt;I will see what I can do. It never dumped a full stack, due to repeatedly hitting the BUG&lt;/p&gt;</comment>
                            <comment id="56549" author="keith" created="Thu, 18 Apr 2013 17:09:39 +0000"  >&lt;p&gt;Cliff what IOR configuration where you running?&lt;/p&gt;

&lt;p&gt;Was that seen on the Client, MDS or OSS?  It does look like stack corruption.  &lt;/p&gt;

&lt;p&gt;Do you have more kernel message context before the scheduling while atomic? &lt;/p&gt;</comment>
                            <comment id="56597" author="keith" created="Thu, 18 Apr 2013 23:48:16 +0000"  >&lt;p&gt;I have a kernel panics seen on similar clients while running. It is big of mess but but they might be related to the stack overflow.. Not sure. &lt;/p&gt;

&lt;p&gt;The client (iwc16) has been up and running for maybe a day so doing tests. &lt;/p&gt;

&lt;p&gt;Then it blew up. &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2013-04-17 08:10:47 WARNING: at kernel/sched_fair.c:1846 hrtick_start_fair+0x18b/0x190() (Not tainted)
2013-04-17 08:10:47 Hardware name: PowerEdge C6220
2013-04-17 08:10:47 Modules linked in: lmv(U) mgc(U) lustre(U) lov(U) osc(U) mdc(U) fid(U) fld(U) ptlrpc(U) obdclass(U) lvfs(U) ko2iblnd(U) lnet(U) sha512_generic sha256_generic libcfs(U) ipmi_devintf acpi_cpufreq freq_table mperf ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_addr mlx4_ib ib_sa ib_mad iw_cxgb4 iw_cxgb3 ib_core dm_mirror dm_region_hash dm_log dm_mod vhost_net macvtap macvlan tun kvm sg sd_mod crc_t10dif wmi dcdbas sb_edac edac_core i2c_i801 i2c_core ahci iTCO_wdt iTCO_vendor_support shpchp ioatdma nfs lockd fscache auth_rpcgss nfs_acl sunrpc mlx4_en mlx4_core igb dca ptp pps_core be2iscsi bnx2i cnic uio ipv6 cxgb4i cxgb4 cxgb3i libcxgbi cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi [last unloaded: cpufreq_ondemand]
2013-04-17 08:10:48 Pid: 31339, comm: ior Not tainted 2.6.32-358.2.1.el6.x86_64 #1
2013-04-17 08:10:48 Call Trace:
2013-04-17 08:10:48  &amp;lt;IRQ&amp;gt;  [&amp;lt;ffffffff8106e2e7&amp;gt;] ? warn_slowpath_common+0x87/0xc0
2013-04-17 08:10:48  [&amp;lt;ffffffff8106e33a&amp;gt;] ? warn_slowpath_null+0x1a/0x20
2013-04-17 08:10:48  [&amp;lt;ffffffff810575eb&amp;gt;] ? hrtick_start_fair+0x18b/0x190
2013-04-17 08:10:48  [&amp;lt;ffffffff81065ceb&amp;gt;] ? enqueue_task_fair+0xfb/0x100
2013-04-17 08:10:48  [&amp;lt;ffffffff81055f76&amp;gt;] ? enqueue_task+0x66/0x80
2013-04-17 08:10:48  [&amp;lt;ffffffff81055fb3&amp;gt;] ? activate_task+0x23/0x30
2013-04-17 08:10:48  [&amp;lt;ffffffff81063120&amp;gt;] ? try_to_wake_up+0x1f0/0x3e0
2013-04-17 08:10:48  [&amp;lt;ffffffff81063322&amp;gt;] ? default_wake_function+0x12/0x20
2013-04-17 08:10:48  [&amp;lt;ffffffff81096cb6&amp;gt;] ? autoremove_wake_function+0x16/0x40
2013-04-17 08:10:48  [&amp;lt;ffffffff81051419&amp;gt;] ? __wake_up_common+0x59/0x90
2013-04-17 08:10:48  [&amp;lt;ffffffff81055aa8&amp;gt;] ? __wake_up+0x48/0x70
2013-04-17 08:10:48  [&amp;lt;ffffffff810912e0&amp;gt;] ? delayed_work_timer_fn+0x0/0x50
2013-04-17 08:10:48  [&amp;lt;ffffffff81090d8d&amp;gt;] ? insert_work+0x6d/0xb0
2013-04-17 08:10:48  [&amp;lt;ffffffff810912c6&amp;gt;] ? __queue_work+0x36/0x50
2013-04-17 08:10:48  [&amp;lt;ffffffff81091319&amp;gt;] ? delayed_work_timer_fn+0x39/0x50
2013-04-17 08:10:48  [&amp;lt;ffffffff81081837&amp;gt;] ? run_timer_softirq+0x197/0x340
2013-04-17 08:10:48  [&amp;lt;ffffffff810a7ff0&amp;gt;] ? tick_sched_timer+0x0/0xc0
2013-04-17 08:10:48  [&amp;lt;ffffffff8102e94d&amp;gt;] ? lapic_next_event+0x1d/0x30
2013-04-17 08:10:48  [&amp;lt;ffffffff81076fb1&amp;gt;] ? __do_softirq+0xc1/0x1e0
2013-04-17 08:10:48  [&amp;lt;ffffffff8109b77b&amp;gt;] ? hrtimer_interrupt+0x14b/0x260
2013-04-17 08:10:48  [&amp;lt;ffffffff8100c1cc&amp;gt;] ? call_softirq+0x1c/0x30
2013-04-17 08:10:48  [&amp;lt;ffffffff8100de05&amp;gt;] ? do_softirq+0x65/0xa0
2013-04-17 08:10:48  [&amp;lt;ffffffff81076d95&amp;gt;] ? irq_exit+0x85/0x90
2013-04-17 08:10:48  [&amp;lt;ffffffff81517000&amp;gt;] ? smp_apic_timer_interrupt+0x70/0x9b
2013-04-17 08:10:48  [&amp;lt;ffffffff8100bb93&amp;gt;] ? apic_timer_interrupt+0x13/0x20
2013-04-17 08:10:48  &amp;lt;EOI&amp;gt;  [&amp;lt;ffffffff8100b2f2&amp;gt;] ? int_check_syscall_exit_work+0x0/0x3d
2013-04-17 08:10:48 ---[ end trace c5d166a5a1430d97 ]---
2013-04-17 08:10:48 ------------[ cut here ]------------
2013-04-17 08:10:48 kernel BUG at kernel/sched.c:1412!
2013-04-17 08:10:48 invalid opcode: 0000 [#1] SMP
2013-04-17 08:10:48 last sysfs file: /sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0/infiniband/mlx4_0/ports/1/pkeys/127
2013-04-17 08:10:48 CPU 31
2013-04-17 08:10:48 Modules linked in: lmv(U) mgc(U) lustre(U) lov(U) osc(U) mdc(U) fid(U) fld(U) ptlrpc(U) obdclass(U) lvfs(U) ko2iblnd(U) lnet(U) sha512_generic sha256_generic libcfs(U) ipmi_devintf acpi_cpufreq freq_table mperf ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_addr mlx4_ib ib_sa ib_mad iw_cxgb4 iw_cxgb3 ib_core dm_mirror dm_region_hash dm_log dm_mod vhost_net macvtap macvlan tun kvm sg sd_mod crc_t10dif wmi dcdbas sb_edac edac_core i2c_i801 i2c_core ahci iTCO_wdt iTCO_vendor_support shpchp ioatdma nfs lockd fscache auth_rpcgss nfs_acl sunrpc mlx4_en mlx4_core igb dca ptp pps_core be2iscsi bnx2i cnic uio ipv6 cxgb4i cxgb4 cxgb3i libcxgbi cxgb3 mdio libiscsi_tcp qla4xxx iscsi_boot_sysfs libiscsi scsi_transport_iscsi [last unloaded: cpufreq_ondemand]
2013-04-17 08:10:48
2013-04-17 08:10:48 Pid: 31339, comm: ior Tainted: G        W  ---------------    2.6.32-358.2.1.el6.x86_64 #1 Dell Inc. PowerEdge C6220/0W6W6G
2013-04-17 08:10:48 RIP: 0010:[&amp;lt;ffffffff8105225a&amp;gt;]  [&amp;lt;ffffffff8105225a&amp;gt;] resched_task+0x6a/0x80
2013-04-17 08:10:48 RSP: 0018:ffff88089c5e3c10  EFLAGS: 00010046
2013-04-17 08:10:48 RAX: 0000000035ee35ee RBX: ffff88089c5f6700 RCX: 00000000000035ee
2013-04-17 08:10:48 RDX: ffff8808625fa000 RSI: ffff880873d18b18 RDI: ffff880577359540
2013-04-17 08:10:48 RBP: ffff88089c5e3c10 R08: 0000000000000000 R09: ffffffff8163fde0
2013-04-17 08:10:48 R10: 0000000000000001 R11: 0000000000000000 R12: ffff880577359578
2013-04-17 08:10:49 R13: ffff880873d18b18 R14: ffff880577359540 R15: 0000000000000000
2013-04-17 08:10:49 FS:  00002aaaafebf8c0(0000) GS:ffff88089c5e0000(0000) knlGS:0000000000000000
2013-04-17 08:10:49 CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
2013-04-17 08:10:49 CR2: 0000000000899000 CR3: 0000001055f74000 CR4: 00000000000407e0
2013-04-17 08:10:49 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
2013-04-17 08:10:49 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
2013-04-17 08:10:49 Process ior (pid: 31339, threadinfo ffff8808625fa000, task ffff880577359540)
2013-04-17 08:10:49 Stack:
2013-04-17 08:10:49  ffff88089c5e3c70 ffffffff810570e0 ffff88089c5e3c50 ffffffff81065ceb
2013-04-17 08:10:49 &amp;lt;d&amp;gt; ffff88089c5e3c50 0000000073d18ae0 ffff88089c5f6700 ffff88089c5f6700
2013-04-17 08:10:49 &amp;lt;d&amp;gt; ffff88089c5f6700 0000000000000000 000000000000001f 0000000000000000
2013-04-17 08:10:49 Call Trace:
2013-04-17 08:10:49  &amp;lt;IRQ&amp;gt;
2013-04-17 08:10:49  [&amp;lt;ffffffff810570e0&amp;gt;] check_preempt_wakeup+0x1c0/0x260
2013-04-17 08:10:49  [&amp;lt;ffffffff81065ceb&amp;gt;] ? enqueue_task_fair+0xfb/0x100
2013-04-17 08:10:49  [&amp;lt;ffffffff8105230c&amp;gt;] check_preempt_curr+0x7c/0x90
2013-04-17 08:10:49  [&amp;lt;ffffffff81063145&amp;gt;] try_to_wake_up+0x215/0x3e0
2013-04-17 08:10:49  [&amp;lt;ffffffff81063322&amp;gt;] default_wake_function+0x12/0x20
2013-04-17 08:10:49  [&amp;lt;ffffffff81096cb6&amp;gt;] autoremove_wake_function+0x16/0x40
2013-04-17 08:10:49  [&amp;lt;ffffffff81051419&amp;gt;] __wake_up_common+0x59/0x90
2013-04-17 08:10:49  [&amp;lt;ffffffff81055aa8&amp;gt;] __wake_up+0x48/0x70
2013-04-17 08:10:49  [&amp;lt;ffffffff810912e0&amp;gt;] ? delayed_work_timer_fn+0x0/0x50
2013-04-17 08:10:49  [&amp;lt;ffffffff81090d8d&amp;gt;] insert_work+0x6d/0xb0
2013-04-17 08:10:49  [&amp;lt;ffffffff810912c6&amp;gt;] __queue_work+0x36/0x50
2013-04-17 08:10:49  [&amp;lt;ffffffff81091319&amp;gt;] delayed_work_timer_fn+0x39/0x50
2013-04-17 08:10:49  [&amp;lt;ffffffff81081837&amp;gt;] run_timer_softirq+0x197/0x340
2013-04-17 08:10:49  [&amp;lt;ffffffff810a7ff0&amp;gt;] ? tick_sched_timer+0x0/0xc0
2013-04-17 08:10:49  [&amp;lt;ffffffff8102e94d&amp;gt;] ? lapic_next_event+0x1d/0x30
2013-04-17 08:10:49  [&amp;lt;ffffffff81076fb1&amp;gt;] __do_softirq+0xc1/0x1e0
2013-04-17 08:10:49  [&amp;lt;ffffffff8109b77b&amp;gt;] ? hrtimer_interrupt+0x14b/0x260
2013-04-17 08:10:49  [&amp;lt;ffffffff8100c1cc&amp;gt;] call_softirq+0x1c/0x30
2013-04-17 08:10:49  [&amp;lt;ffffffff8100de05&amp;gt;] do_softirq+0x65/0xa0
2013-04-17 08:10:49  [&amp;lt;ffffffff81076d95&amp;gt;] irq_exit+0x85/0x90
2013-04-17 08:10:49  [&amp;lt;ffffffff81517000&amp;gt;] smp_apic_timer_interrupt+0x70/0x9b
2013-04-17 08:10:49  [&amp;lt;ffffffff8100bb93&amp;gt;] apic_timer_interrupt+0x13/0x20
013-04-17 08:10:49  &amp;lt;EOI&amp;gt;
2013-04-17 08:10:49  [&amp;lt;ffffffff8100b2f2&amp;gt;] ? int_check_syscall_exit_work+0x0/0x3d
2013-04-17 08:10:49 Code: 25 b8 e0 00 00 8b 40 18 39 d0 74 0d 0f ae f0 48 8b 57 08 f6 42 14 04 74 08 c9 c3 66 0f 1f 44 00 00 89 c7 ff 15 28 43 a4 00 c9 c3 &amp;lt;0f&amp;gt; 0b 0f 1f 40 00 eb fa 66 66 66 66 66 2e 0f 1f 84 00 00 00 00
2013-04-17 08:10:49 RIP  [&amp;lt;ffffffff8105225a&amp;gt;] resched_task+0x6a/0x80
2013-04-17 08:10:49  RSP &amp;lt;ffff88089c5e3c10&amp;gt;
2013-04-17 08:10:50 Initializing cgroup subsys cpuset
2013-04-17 08:10:50 Initializing cgroup subsys cpu
2013-04-17 08:10:50 Linux version 2.6.32-358.2.1.el6.x86_64 
....  
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Kernel reboots. &lt;/p&gt;</comment>
                            <comment id="56598" author="keith" created="Thu, 18 Apr 2013 23:56:33 +0000"  >&lt;p&gt;Conman output from a client that got BUG: scheduling while atomic: ior/19445/0x10010100&lt;br/&gt;
and other related errors. &lt;/p&gt;</comment>
                            <comment id="56599" author="keith" created="Thu, 18 Apr 2013 23:57:05 +0000"  >&lt;p&gt;Another schedule while atomic with some real debug. &lt;br/&gt;
The whole console output is full of readable back traces.  There are some very deep stacks in it. &lt;/p&gt;
</comment>
                            <comment id="56728" author="adilger" created="Mon, 22 Apr 2013 19:40:37 +0000"  >&lt;p&gt;There is a stack overflow in the attached console logs:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2013-04-15 04:52:31 WARNING: at kernel/sched_fair.c:1846 hrtick_start_fair+0x18b/0x190() (Tainted: G        W  
2013-04-15 04:52:32 Pid: 19445, comm: ior Tainted: G        W  ---------------    2.6.32-358.2.1.el6.x86_64 #1
2013-04-15 04:52:32 Call Trace:
2013-04-15 04:52:32  &amp;lt;IRQ&amp;gt;  [&amp;lt;ffffffff8106e2e7&amp;gt;] ? warn_slowpath_common+0x87/0xc0
2013-04-15 04:52:32  [&amp;lt;ffffffff8106e33a&amp;gt;] ? warn_slowpath_null+0x1a/0x20
2013-04-15 04:52:32  [&amp;lt;ffffffff810575eb&amp;gt;] ? hrtick_start_fair+0x18b/0x190
2013-04-15 04:52:32  [&amp;lt;ffffffff81065ceb&amp;gt;] ? enqueue_task_fair+0xfb/0x100
2013-04-15 04:52:32  [&amp;lt;ffffffff81055f76&amp;gt;] ? enqueue_task+0x66/0x80
2013-04-15 04:52:32  [&amp;lt;ffffffff81055fb3&amp;gt;] ? activate_task+0x23/0x30
2013-04-15 04:52:32  [&amp;lt;ffffffff81063120&amp;gt;] ? try_to_wake_up+0x1f0/0x3e0
2013-04-15 04:52:32  [&amp;lt;ffffffff81063322&amp;gt;] ? default_wake_function+0x12/0x20
2013-04-15 04:52:32  [&amp;lt;ffffffff81096cb6&amp;gt;] ? autoremove_wake_function+0x16/0x40
2013-04-15 04:52:32  [&amp;lt;ffffffff81051419&amp;gt;] ? __wake_up_common+0x59/0x90
2013-04-15 04:52:32  [&amp;lt;ffffffff81055aa8&amp;gt;] ? __wake_up+0x48/0x70
2013-04-15 04:52:32  [&amp;lt;ffffffff810912e0&amp;gt;] ? delayed_work_timer_fn+0x0/0x50
2013-04-15 04:52:32  [&amp;lt;ffffffff81090d8d&amp;gt;] ? insert_work+0x6d/0xb0
2013-04-15 04:52:32  [&amp;lt;ffffffff810912c6&amp;gt;] ? __queue_work+0x36/0x50
2013-04-15 04:52:32  [&amp;lt;ffffffff81091319&amp;gt;] ? delayed_work_timer_fn+0x39/0x50
2013-04-15 04:52:32  [&amp;lt;ffffffff81081837&amp;gt;] ? run_timer_softirq+0x197/0x340
2013-04-15 04:52:32  [&amp;lt;ffffffff81076fb1&amp;gt;] ? __do_softirq+0xc1/0x1e0
2013-04-15 04:52:32  [&amp;lt;ffffffff8109b77b&amp;gt;] ? hrtimer_interrupt+0x14b/0x260
2013-04-15 04:52:32  [&amp;lt;ffffffff8100c1cc&amp;gt;] ? call_softirq+0x1c/0x30
2013-04-15 04:52:32  [&amp;lt;ffffffff8100de05&amp;gt;] ? do_softirq+0x65/0xa0
2013-04-15 04:52:32  [&amp;lt;ffffffff81076d95&amp;gt;] ? irq_exit+0x85/0x90
2013-04-15 04:52:32  [&amp;lt;ffffffff81517000&amp;gt;] ? smp_apic_timer_interrupt+0x70/0x9b
2013-04-15 04:52:32  [&amp;lt;ffffffff8100bb93&amp;gt;] ? apic_timer_interrupt+0x13/0x20
2013-04-15 04:52:32  &amp;lt;EOI&amp;gt;  [&amp;lt;ffffffff81510297&amp;gt;] ? _spin_unlock_irqrestore+0x17/0x20
2013-04-15 04:52:32  [&amp;lt;ffffffff81055ab3&amp;gt;] ? __wake_up+0x53/0x70
2013-04-15 04:52:32  [&amp;lt;ffffffffa04cf75a&amp;gt;] ? cfs_waitq_signal+0x1a/0x20 [libcfs]
2013-04-15 04:52:32  [&amp;lt;ffffffffa07a18ca&amp;gt;] ? ptlrpc_set_add_new_req+0x9a/0x150 [ptlrpc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa07d703f&amp;gt;] ? ptlrpcd_add_req+0x18f/0x2d0 [ptlrpc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0677c51&amp;gt;] ? cl_req_attr_set+0xd1/0x230 [obdclass]
2013-04-15 04:52:32  [&amp;lt;ffffffffa07b423c&amp;gt;] ? lustre_msg_get_opc+0x9c/0x110 [ptlrpc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0971e2e&amp;gt;] ? osc_build_rpc+0xd7e/0x1730 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0988ff3&amp;gt;] ? osc_send_read_rpc+0x6a3/0x880 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa04e4522&amp;gt;] ? cfs_hash_bd_from_key+0x42/0xd0 [libcfs]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0661c6c&amp;gt;] ? lu_object_put+0x12c/0x330 [obdclass]
2013-04-15 04:52:32  [&amp;lt;ffffffffa098cb66&amp;gt;] ? osc_io_unplug0+0xb46/0x12c0 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffff81167330&amp;gt;] ? cache_alloc_refill+0x1c0/0x240
2013-04-15 04:52:32  [&amp;lt;ffffffffa098f081&amp;gt;] ? osc_io_unplug+0x11/0x20 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0995c70&amp;gt;] ? osc_queue_sync_pages+0x1d0/0x360 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa09807d0&amp;gt;] ? osc_io_submit+0x330/0x4a0 [osc]
2013-04-15 04:52:32  [&amp;lt;ffffffffa06775bc&amp;gt;] ? cl_io_submit_rw+0x6c/0x160 [obdclass]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0a1604a&amp;gt;] ? lov_io_submit+0x4ea/0xbc0 [lov]
2013-04-15 04:52:32  [&amp;lt;ffffffffa06775bc&amp;gt;] ? cl_io_submit_rw+0x6c/0x160 [obdclass]
2013-04-15 04:52:32  [&amp;lt;ffffffffa0679bde&amp;gt;] ? cl_io_read_page+0xae/0x170 [obdclass]
2013-04-15 04:52:32  [&amp;lt;ffffffffa066d9a7&amp;gt;] ? cl_page_assume+0xf7/0x220 [obdclass]
2013-04-15 04:52:33  [&amp;lt;ffffffffa0aba5d6&amp;gt;] ? ll_readpage+0x96/0x1a0 [lustre]
2013-04-15 04:52:33  [&amp;lt;ffffffff81096ce0&amp;gt;] ? wake_bit_function+0x0/0x50
2013-04-15 04:52:33  [&amp;lt;ffffffff8111b7ac&amp;gt;] ? generic_file_aio_read+0x1fc/0x700
2013-04-15 04:52:33  [&amp;lt;ffffffffa0ae7707&amp;gt;] ? vvp_io_read_start+0x257/0x470 [lustre]
2013-04-15 04:52:33  [&amp;lt;ffffffffa067771a&amp;gt;] ? cl_io_start+0x6a/0x140 [obdclass]
2013-04-15 04:52:33  [&amp;lt;ffffffffa067be54&amp;gt;] ? cl_io_loop+0xb4/0x1b0 [obdclass]
2013-04-15 04:52:33  [&amp;lt;ffffffffa0a8e6df&amp;gt;] ? ll_file_io_generic+0x33f/0x600 [lustre]
2013-04-15 04:52:33  [&amp;lt;ffffffffa0a8eadf&amp;gt;] ? ll_file_aio_read+0x13f/0x2c0 [lustre]
2013-04-15 04:52:33  [&amp;lt;ffffffffa0a8f68c&amp;gt;] ? ll_file_read+0x16c/0x2a0 [lustre]
2013-04-15 04:52:33  [&amp;lt;ffffffff811817a5&amp;gt;] ? vfs_read+0xb5/0x1a0
2013-04-15 04:52:33  [&amp;lt;ffffffff811818e1&amp;gt;] ? sys_read+0x51/0x90
2013-04-15 04:52:33  [&amp;lt;ffffffff8100b072&amp;gt;] ? system_call_fastpath+0x16/0x1b
2013-04-15 04:52:33 ---[ end trace 47344f6fbb7a59e2 ]---
2013-04-15 04:52:33 BUG: scheduling while atomic: ior/19445/0x10010100
2013-04-15 04:52:33 Pid: 19445, comm: ior Tainted: G        W  ---------------    2.6.32-358.2.1.el6.x86_64 #1 Dell Inc. PowerEdge C6220/0HYFFG
2013-04-15 04:52:33 RIP: 0010:[&amp;lt;ffffffff81510297&amp;gt;]  [&amp;lt;ffffffff81510297&amp;gt;] _spin_unlock_irqrestore
2013-04-15 04:52:34 Call Trace:
2013-04-15 04:52:34  &amp;lt;IRQ&amp;gt; 
2013-04-15 04:52:34  [&amp;lt;ffffffff810912d1&amp;gt;] ? __queue_work+0x41/0x50
2013-04-15 04:52:34  [&amp;lt;ffffffff81091319&amp;gt;] ? delayed_work_timer_fn+0x39/0x50
2013-04-15 04:52:34  [&amp;lt;ffffffff81081837&amp;gt;] ? run_timer_softirq+0x197/0x340
2013-04-15 04:52:34  [&amp;lt;ffffffff81076fb1&amp;gt;] ? __do_softirq+0xc1/0x1e0
2013-04-15 04:52:34  [&amp;lt;ffffffff8109b77b&amp;gt;] ? hrtimer_interrupt+0x14b/0x260
2013-04-15 04:52:34  [&amp;lt;ffffffff8100c1cc&amp;gt;] ? call_softirq+0x1c/0x30
2013-04-15 04:52:34  [&amp;lt;ffffffff8100de05&amp;gt;] ? do_softirq+0x65/0xa0
2013-04-15 04:52:34  [&amp;lt;ffffffff81076d95&amp;gt;] ? irq_exit+0x85/0x90
2013-04-15 04:52:34  [&amp;lt;ffffffff81517000&amp;gt;] ? smp_apic_timer_interrupt+0x70/0x9b
2013-04-15 04:52:34  [&amp;lt;ffffffff8100bb93&amp;gt;] ? apic_timer_interrupt+0x13/0x20
2013-04-15 04:52:34  &amp;lt;EOI&amp;gt; 
2013-04-15 04:52:34  [&amp;lt;ffffffff81510297&amp;gt;] ? _spin_unlock_irqrestore+0x17/0x20
2013-04-15 04:52:34  [&amp;lt;ffffffff81055ab3&amp;gt;] ? __wake_up+0x53/0x70
2013-04-15 04:52:34  [&amp;lt;ffffffffa04cf75a&amp;gt;] ? cfs_waitq_signal+0x1a/0x20 [libcfs]
2013-04-15 04:52:34  [&amp;lt;ffffffffa07a18ca&amp;gt;] ? ptlrpc_set_add_new_req+0x9a/0x150 [ptlrpc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa07d703f&amp;gt;] ? ptlrpcd_add_req+0x18f/0x2d0 [ptlrpc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0677c51&amp;gt;] ? cl_req_attr_set+0xd1/0x230 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa07b423c&amp;gt;] ? lustre_msg_get_opc+0x9c/0x110 [ptlrpc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0971e2e&amp;gt;] ? osc_build_rpc+0xd7e/0x1730 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0988ff3&amp;gt;] ? osc_send_read_rpc+0x6a3/0x880 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa04e4522&amp;gt;] ? cfs_hash_bd_from_key+0x42/0xd0 [libcfs]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0661c6c&amp;gt;] ? lu_object_put+0x12c/0x330 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa098cb66&amp;gt;] ? osc_io_unplug0+0xb46/0x12c0 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffff81167330&amp;gt;] ? cache_alloc_refill+0x1c0/0x240
2013-04-15 04:52:34  [&amp;lt;ffffffffa098f081&amp;gt;] ? osc_io_unplug+0x11/0x20 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0995c70&amp;gt;] ? osc_queue_sync_pages+0x1d0/0x360 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa09807d0&amp;gt;] ? osc_io_submit+0x330/0x4a0 [osc]
2013-04-15 04:52:34  [&amp;lt;ffffffffa06775bc&amp;gt;] ? cl_io_submit_rw+0x6c/0x160 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0a1604a&amp;gt;] ? lov_io_submit+0x4ea/0xbc0 [lov]
2013-04-15 04:52:34  [&amp;lt;ffffffffa06775bc&amp;gt;] ? cl_io_submit_rw+0x6c/0x160 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0679bde&amp;gt;] ? cl_io_read_page+0xae/0x170 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa066d9a7&amp;gt;] ? cl_page_assume+0xf7/0x220 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0aba5d6&amp;gt;] ? ll_readpage+0x96/0x1a0 [lustre]
2013-04-15 04:52:34  [&amp;lt;ffffffff81096ce0&amp;gt;] ? wake_bit_function+0x0/0x50
2013-04-15 04:52:34  [&amp;lt;ffffffff8111b7ac&amp;gt;] ? generic_file_aio_read+0x1fc/0x700
2013-04-15 04:52:34  [&amp;lt;ffffffffa0ae7707&amp;gt;] ? vvp_io_read_start+0x257/0x470 [lustre]
2013-04-15 04:52:34  [&amp;lt;ffffffffa067771a&amp;gt;] ? cl_io_start+0x6a/0x140 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa067be54&amp;gt;] ? cl_io_loop+0xb4/0x1b0 [obdclass]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0a8e6df&amp;gt;] ? ll_file_io_generic+0x33f/0x600 [lustre]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0a8eadf&amp;gt;] ? ll_file_aio_read+0x13f/0x2c0 [lustre]
2013-04-15 04:52:34  [&amp;lt;ffffffffa0a8f68c&amp;gt;] ? ll_file_read+0x16c/0x2a0 [lustre]
2013-04-15 04:52:34  [&amp;lt;ffffffff811817a5&amp;gt;] ? vfs_read+0xb5/0x1a0
2013-04-15 04:52:34  [&amp;lt;ffffffff811818e1&amp;gt;] ? sys_read+0x51/0x90
2013-04-15 04:52:34  [&amp;lt;ffffffff8100b072&amp;gt;] ? system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="56920" author="bobijam" created="Wed, 24 Apr 2013 10:36:13 +0000"  >&lt;p&gt;Jinshan,&lt;/p&gt;

&lt;p&gt;Can osc_queue_sync_pages() call osc_io_unplug_async() instead of osc_io_unplug()? My understanding of their difference is that whether we build the RPC request on this thread context or on ptlrpcd context.&lt;/p&gt;</comment>
                            <comment id="56939" author="jay" created="Wed, 24 Apr 2013 16:42:31 +0000"  >&lt;p&gt;Yes, we can use osc_io_unplug_async() here. However, I don&apos;t think the calling stack is super long so I&apos;m not sure if there exists a large local variable somewhere.&lt;/p&gt;</comment>
                            <comment id="57206" author="bobijam" created="Sun, 28 Apr 2013 09:22:22 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/6191&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6191&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="57600" author="bobijam" created="Fri, 3 May 2013 01:52:39 +0000"  >&lt;p&gt;Keith, &lt;/p&gt;

&lt;p&gt;Can you try &lt;a href=&quot;http://review.whamcloud.com/6191&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6191&lt;/a&gt; as you&apos;ve reproduced the issue?&lt;/p&gt;</comment>
                            <comment id="57640" author="keith" created="Fri, 3 May 2013 15:24:02 +0000"  >&lt;p&gt;All those stack traces can from Hyperion.  Cliff can we re-run this on on the cluster with the fix patch?&lt;/p&gt;</comment>
                            <comment id="57673" author="keith" created="Fri, 3 May 2013 19:17:29 +0000"  >&lt;p&gt;Ok after chatting with Peter it is clear we want to be sure we don&apos;t regress on lu-2909.  I am setting up to test for that issue with this fix patch.  I will update this LU and the patch when I pass at least 24 hours of testing or if I encounter the fsx crash again. &lt;/p&gt;</comment>
                            <comment id="57695" author="keith" created="Sun, 5 May 2013 22:36:09 +0000"  >&lt;p&gt;I have been testing for close to 48 hours with out encountering any issues with the fsx test. I will let the tests continue for run and update if I do encounter anything.  I acked the patch. &lt;/p&gt;</comment>
                            <comment id="57701" author="pjones" created="Mon, 6 May 2013 02:08:15 +0000"  >&lt;p&gt;Proposed fix landed for 2.4. Will open a new ticket if that change still proves insufficient for Hyperion test runs.&lt;/p&gt;</comment>
                            <comment id="57728" author="keith" created="Mon, 6 May 2013 15:18:57 +0000"  >&lt;p&gt;Just for clarity Hyperion got worse:  &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3281&quot; title=&quot;IO Fails - client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3281&quot;&gt;&lt;del&gt;LU-3281&lt;/del&gt;&lt;/a&gt; IO Fails - client stack overrun &lt;/p&gt;</comment>
                            <comment id="58296" author="green" created="Mon, 13 May 2013 17:44:46 +0000"  >&lt;p&gt;patch 6191 reverted from master&lt;/p&gt;</comment>
                            <comment id="58503" author="spitzcor" created="Tue, 14 May 2013 20:31:08 +0000"  >&lt;p&gt;Was 6191 reverted due to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3281&quot; title=&quot;IO Fails - client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3281&quot;&gt;&lt;del&gt;LU-3281&lt;/del&gt;&lt;/a&gt; (which is marked resolved-fixed) ?&lt;/p&gt;</comment>
                            <comment id="58508" author="pjones" created="Tue, 14 May 2013 21:00:02 +0000"  >&lt;p&gt;Hi Cory&lt;/p&gt;

&lt;p&gt;Basically, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3188&quot; title=&quot;IOR fails due to client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3188&quot;&gt;&lt;del&gt;LU-3188&lt;/del&gt;&lt;/a&gt; was the initial attempt to fix the issue ultimately fixed in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3281&quot; title=&quot;IO Fails - client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3281&quot;&gt;&lt;del&gt;LU-3281&lt;/del&gt;&lt;/a&gt; but it was found to have unfortunate side-effects under some circumstances.&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="83129" author="bobijam" created="Sun, 4 May 2014 02:09:29 +0000"  >&lt;p&gt;per &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4982&quot; title=&quot;Stack overrun in client under ll_fault&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4982&quot;&gt;&lt;del&gt;LU-4982&lt;/del&gt;&lt;/a&gt; comment, if the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4509&quot; title=&quot;clio can be stuck in osc_extent_wait&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4509&quot;&gt;&lt;del&gt;LU-4509&lt;/del&gt;&lt;/a&gt; patch could cure the side effect of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3188&quot; title=&quot;IOR fails due to client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3188&quot;&gt;&lt;del&gt;LU-3188&lt;/del&gt;&lt;/a&gt;, we&apos;d better revive the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3188&quot; title=&quot;IOR fails due to client stack overrun&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3188&quot;&gt;&lt;del&gt;LU-3188&lt;/del&gt;&lt;/a&gt; patch&lt;/p&gt;</comment>
                            <comment id="87320" author="pjones" created="Mon, 23 Jun 2014 21:36:09 +0000"  >&lt;p&gt;Landed for 2.6&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="17690">LU-2859</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="18717">LU-3281</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12552" name="console.iwc108" size="1256054" author="keith" created="Thu, 18 Apr 2013 23:56:33 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvofz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>7781</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>