<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:22:55 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2165] sanity test 64b hang with zfs</title>
                <link>https://jira.whamcloud.com/browse/LU-2165</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;single node FSTYPE=zfs sanity.sh is hanging for me:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 4020.591245] Lustre: DEBUG MARKER: == sanity test 64b: check out-of-space detection on client ============= 18:00:03 (1350079203)
[ 4301.008200] LNet: Service thread pid 29286 was inactive for 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 4301.018042] Pid: 29286, comm: ll_ost_io01_005
[ 4301.018946]
[ 4301.018947] Call Trace:
[ 4301.019836]  [&amp;lt;ffffffff814fb5d1&amp;gt;] schedule_timeout+0x191/0x2e0
[ 4301.021092]  [&amp;lt;ffffffff8107bcd0&amp;gt;] ? process_timeout+0x0/0x10
[ 4301.022300]  [&amp;lt;ffffffffa09f56e0&amp;gt;] __cv_timedwait_common+0xc0/0x280 [spl]
[ 4301.023736]  [&amp;lt;ffffffff814fc20c&amp;gt;] ? __mutex_lock_slowpath+0x21c/0x2c0
[ 4301.025058]  [&amp;lt;ffffffff8108fd60&amp;gt;] ? autoremove_wake_function+0x0/0x40
[ 4301.026319]  [&amp;lt;ffffffffa09f58d3&amp;gt;] __cv_timedwait+0x13/0x20 [spl]
[ 4301.027593]  [&amp;lt;ffffffffa0a956b7&amp;gt;] txg_delay+0xf7/0x140 [zfs]
[ 4301.028851]  [&amp;lt;ffffffffa0a6d5fe&amp;gt;] dsl_dir_tempreserve_space+0x1de/0x1f0 [zfs]
[ 4301.030237]  [&amp;lt;ffffffffa0a5732d&amp;gt;] ? dmu_tx_wait+0x11d/0x120 [zfs]
[ 4301.031719]  [&amp;lt;ffffffffa0a5740e&amp;gt;] ? dmu_tx_assign+0x8e/0x4b0 [zfs]
[ 4301.033237]  [&amp;lt;ffffffffa08b955c&amp;gt;] ? osd_trans_start+0x9c/0x430 [osd_zfs]
[ 4301.034868]  [&amp;lt;ffffffffa0cebe1d&amp;gt;] ? ofd_trans_start+0x22d/0x450 [ofd]
[ 4301.036448]  [&amp;lt;ffffffffa0cf0772&amp;gt;] ? ofd_commitrw_write+0x652/0x12a0 [ofd]
[ 4301.037807]  [&amp;lt;ffffffffa0cf1775&amp;gt;] ? ofd_commitrw+0x3b5/0x9c0 [ofd]
[ 4301.039079]  [&amp;lt;ffffffffa0c2dbd8&amp;gt;] ? obd_commitrw+0x128/0x3d0 [ost]
[ 4301.040326]  [&amp;lt;ffffffffa0c34e34&amp;gt;] ? ost_brw_write+0xd04/0x15d0 [ost]
[ 4301.041592]  [&amp;lt;ffffffffa05042f0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
[ 4301.042939]  [&amp;lt;ffffffffa0c3a250&amp;gt;] ? ost_handle+0x3120/0x4550 [ost]
[ 4301.044167]  [&amp;lt;ffffffffa0e9b464&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 4301.045471]  [&amp;lt;ffffffffa0551483&amp;gt;] ? ptlrpc_server_handle_request+0x463/0xe70 [ptlrpc]
[ 4301.047047]  [&amp;lt;ffffffffa0e8f66e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 4301.048460]  [&amp;lt;ffffffffa054a171&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 4301.049727]  [&amp;lt;ffffffffa0e9f6d1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
[ 4301.051178]  [&amp;lt;ffffffff81051f73&amp;gt;] ? __wake_up+0x53/0x70
[ 4301.052242]  [&amp;lt;ffffffffa055401a&amp;gt;] ? ptlrpc_main+0xb9a/0x1960 [ptlrpc]
[ 4301.053519]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4301.054678]  [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
[ 4301.055860]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4301.057251]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4301.058523]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
[ 4301.059582]
[ 4301.059918] LustreError: dumping log to /tmp/lustre-log.1350079484.29286
[ 4307.044564] LNet: Service thread pid 29646 was inactive for 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 4307.049281] Pid: 29646, comm: ll_ost_io00_007
[ 4307.050021]
[ 4307.050022] Call Trace:
[ 4307.050711]  [&amp;lt;ffffffff8100e530&amp;gt;] ? dump_trace+0x190/0x3b0
[ 4307.051628]  [&amp;lt;ffffffff81174d23&amp;gt;] ? create_object+0x33/0x2c0
[ 4307.052579]  [&amp;lt;ffffffff814fdb54&amp;gt;] ? _spin_lock_irqsave+0x24/0x30
[ 4307.053624]  [&amp;lt;ffffffff81061580&amp;gt;] ? pick_next_task_fair+0xd0/0x130
[ 4307.054684]  [&amp;lt;ffffffff814fb5d1&amp;gt;] schedule_timeout+0x191/0x2e0
[ 4307.055661]  [&amp;lt;ffffffff8107bcd0&amp;gt;] ? process_timeout+0x0/0x10
[ 4307.056646]  [&amp;lt;ffffffffa09f56e0&amp;gt;] __cv_timedwait_common+0xc0/0x280 [spl]
[ 4307.057801]  [&amp;lt;ffffffff814fc20c&amp;gt;] ? __mutex_lock_slowpath+0x21c/0x2c0
[ 4307.058899]  [&amp;lt;ffffffffa09ed0ca&amp;gt;] ? kmem_alloc_debug+0x13a/0x4c0 [spl]
[ 4307.060010]  [&amp;lt;ffffffffa09f58d3&amp;gt;] __cv_timedwait+0x13/0x20 [spl]
[ 4307.061059]  [&amp;lt;ffffffffa0a956b7&amp;gt;] txg_delay+0xf7/0x140 [zfs]
[ 4307.062048]  [&amp;lt;ffffffffa0a6d5fe&amp;gt;] dsl_dir_tempreserve_space+0x1de/0x1f0 [zfs]
[ 4307.063270]  [&amp;lt;ffffffffa0a577fb&amp;gt;] dmu_tx_assign+0x47b/0x4b0 [zfs]
[ 4307.064295]  [&amp;lt;ffffffffa08b955c&amp;gt;] osd_trans_start+0x9c/0x430 [osd_zfs]
[ 4307.065440]  [&amp;lt;ffffffffa0cebe1d&amp;gt;] ofd_trans_start+0x22d/0x450 [ofd]
[ 4307.066507]  [&amp;lt;ffffffffa0cf0772&amp;gt;] ofd_commitrw_write+0x652/0x12a0 [ofd]
[ 4307.067656]  [&amp;lt;ffffffffa0cf1775&amp;gt;] ofd_commitrw+0x3b5/0x9c0 [ofd]
[ 4307.068690]  [&amp;lt;ffffffffa0c2dbd8&amp;gt;] obd_commitrw+0x128/0x3d0 [ost]
[ 4307.069707]  [&amp;lt;ffffffffa0c34e34&amp;gt;] ost_brw_write+0xd04/0x15d0 [ost]
[ 4307.070777]  [&amp;lt;ffffffffa05042f0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
[ 4307.071911]  [&amp;lt;ffffffffa0c3a250&amp;gt;] ost_handle+0x3120/0x4550 [ost]
[ 4307.072975]  [&amp;lt;ffffffffa0e9b464&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 4307.074082]  [&amp;lt;ffffffffa0551483&amp;gt;] ptlrpc_server_handle_request+0x463/0xe70 [ptlrpc]
[ 4307.075370]  [&amp;lt;ffffffffa0e8f66e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 4307.076445]  [&amp;lt;ffffffffa054a171&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 4307.077561]  [&amp;lt;ffffffffa0e9f6d1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
[ 4307.078655]  [&amp;lt;ffffffff81051f73&amp;gt;] ? __wake_up+0x53/0x70
[ 4307.079601]  [&amp;lt;ffffffffa055401a&amp;gt;] ptlrpc_main+0xb9a/0x1960 [ptlrpc]
[ 4307.080673]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4307.081885]  [&amp;lt;ffffffff8100c14a&amp;gt;] child_rip+0xa/0x20
[ 4307.082707]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4307.083715]  [&amp;lt;ffffffffa0553480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 4307.084911]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
[ 4307.085896]
...
[ 4914.044577] Lustre: 29335:0:(service.c:1266:ptlrpc_at_send_early_reply()) @@@ Couldn&apos;t add any time (5/-207), not sending early reply
[ 4914.044580]   req@ffff8801a51c9bf0 x1415657954602061/t0(0) o4-&amp;gt;5dc665da-eb3e-3cef-f2b8-d3318e082c88@0@lo:0/0 lens 488/448 e 5 to 0 dl 1350080102 ref 2 fl Interpret:/0/0 rc 0/0
[ 5024.072116] Lustre: 4182:0:(client.c:1909:ptlrpc_expire_one_request()) @@@ Request  sent has timed out for slow reply: [sent 1350079284/real 1350079284]  req@ffff88016fb9fbf0 x1415657954602058/t0(0) o4-&amp;gt;lustre-OST0001-osc-ffff88023539fbf0@0@lo:6/4 lens 488/448 e 5 to 1 dl 1350080207 ref 2 fl Rpc:X/0/ffffffff rc 0/-1
[ 5024.080010] Lustre: lustre-OST0001-osc-ffff88023539fbf0: Connection to lustre-OST0001 (at 0@lo) was lost; in progress operations using this service will wait for recovery to complete
[ 5024.083451] Lustre: lustre-OST0001: Client 5dc665da-eb3e-3cef-f2b8-d3318e082c88 (at 0@lo) reconnecting
[ 5024.087183] Lustre: lustre-OST0001: Client 5dc665da-eb3e-3cef-f2b8-d3318e082c88 (at 0@lo) refused reconnection, still busy with 1 active RPCs
[ 5024.090527] LustreError: 11-0: an error occurred while communicating with 0@lo. The ost_connect operation failed with -16
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And it never completes.&lt;/p&gt;</description>
                <environment></environment>
        <key id="16346">LU-2165</key>
            <summary>sanity test 64b hang with zfs</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Fri, 12 Oct 2012 18:33:13 +0000</created>
                <updated>Wed, 1 Jul 2015 17:30:23 +0000</updated>
                            <resolved>Wed, 1 Jul 2015 17:30:23 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="46500" author="green" created="Fri, 12 Oct 2012 22:53:09 +0000"  >&lt;p&gt;This one seems to be fully repeatable for me with:&lt;br/&gt;
SLOW=yes REFORMAT=yes FSTYPE=zfs sh sanity.sh&lt;/p&gt;</comment>
                            <comment id="46520" author="green" created="Sat, 13 Oct 2012 20:09:52 +0000"  >&lt;p&gt;Hm, addition, this only seems to be fully repeatable when I have kmemleak enabled and running (Which put some extra mem pressure on things,&lt;br/&gt;
with kmemleak disabled test 64b seems to pass for me.&lt;br/&gt;
Here&apos;s another trace from the last failure I had:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2148.341196] Lustre: DEBUG MARKER: == sanity test 64b: check out-of-space dete
ction on client ============= 19:51:42 (1350172302)
[ 2430.988224] LNet: Service thread pid 21584 was inactive for 200.00s. The thre
ad might be hung, or it might only be slow and will resume later. Dumping the st
ack trace for debugging purposes:[ 2430.991114] Pid: 21584, comm: ll_ost_io01_008
[ 2430.991874]
[ 2430.991874] Call Trace:
[ 2430.992557]  [&amp;lt;ffffffff8100e530&amp;gt;] ? dump_trace+0x190/0x3b0
[ 2430.993560]  [&amp;lt;ffffffff81174d23&amp;gt;] ? create_object+0x33/0x2c0
[ 2430.994549]  [&amp;lt;ffffffff81174e9c&amp;gt;] ? create_object+0x1ac/0x2c0
[ 2430.996102]  [&amp;lt;ffffffffa09f5984&amp;gt;] ? cv_wait_common+0xa4/0x1b0 [spl]
[ 2430.997777]  [&amp;lt;ffffffffa09f58d3&amp;gt;] ? __cv_timedwait+0x13/0x20 [spl]
[ 2430.999375]  [&amp;lt;ffffffffa0a956b7&amp;gt;] ? txg_delay+0xf7/0x140 [zfs]
[ 2431.000944]  [&amp;lt;ffffffffa0a6d5fe&amp;gt;] ? dsl_dir_tempreserve_space+0x1de/0x1f0 [zf
s]
[ 2431.002927]  [&amp;lt;ffffffffa0a575f8&amp;gt;] ? dmu_tx_assign+0x278/0x4b0 [zfs][ 2431.004618]  [&amp;lt;ffffffffa0b5655c&amp;gt;] ? osd_trans_start+0x9c/0x430 [osd_zfs]
[ 2431.006422]  [&amp;lt;ffffffffa0d49e1d&amp;gt;] ? ofd_trans_start+0x22d/0x450 [ofd]
[ 2431.008114]  [&amp;lt;ffffffffa0d4e772&amp;gt;] ? ofd_commitrw_write+0x652/0x12a0 [ofd]
[ 2431.009985]  [&amp;lt;ffffffffa0d4f775&amp;gt;] ? ofd_commitrw+0x3b5/0x9c0 [ofd]
[ 2431.011623]  [&amp;lt;ffffffffa0caabd8&amp;gt;] ? obd_commitrw+0x128/0x3d0 [ost]
[ 2431.013192]  [&amp;lt;ffffffffa0cb1e34&amp;gt;] ? ost_brw_write+0xd04/0x15d0 [ost]
[ 2431.014756]  [&amp;lt;ffffffffa05032f0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
[ 2431.016372]  [&amp;lt;ffffffffa0cb7250&amp;gt;] ? ost_handle+0x3120/0x4550 [ost]
[ 2431.017889]  [&amp;lt;ffffffffa01f4464&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 2431.019396]  [&amp;lt;ffffffffa0550483&amp;gt;] ? ptlrpc_server_handle_request+0x463/0xe70 [ptlrpc]
[ 2431.021231]  [&amp;lt;ffffffffa01e866e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 2431.022757]  [&amp;lt;ffffffffa0549171&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 2431.024408]  [&amp;lt;ffffffffa01f86d1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
[ 2431.025997]  [&amp;lt;ffffffff81051f73&amp;gt;] ? __wake_up+0x53/0x70
[ 2431.027116]  [&amp;lt;ffffffffa055301a&amp;gt;] ? ptlrpc_main+0xb9a/0x1960 [ptlrpc]
[ 2431.028214]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2431.029378]  [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
[ 2431.030288]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2431.031413]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2431.032518]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
[ 2431.033434]
[ 2431.033718] LustreError: dumping log to /tmp/lustre-log.1350172585.21584
[ 2434.700279] LNet: Service thread pid 3314 was inactive for 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 2434.704227] Pid: 3314, comm: ll_ost_io00_001
[ 2434.705219]
[ 2434.705220] Call Trace:
[ 2434.706092]  [&amp;lt;ffffffff81164533&amp;gt;] ? __kmalloc+0x1c3/0x2f0
[ 2434.707182]  [&amp;lt;ffffffff814fc20c&amp;gt;] ? __mutex_lock_slowpath+0x21c/0x2c0
[ 2434.708537]  [&amp;lt;ffffffffa09ea85b&amp;gt;] kmem_free_debug+0x4b/0x150 [spl]
[ 2434.709854]  [&amp;lt;ffffffffa09f58d3&amp;gt;] ? __cv_timedwait+0x13/0x20 [spl]
[ 2434.711112]  [&amp;lt;ffffffffa0a956b7&amp;gt;] ? txg_delay+0xf7/0x140 [zfs]
[ 2434.712350]  [&amp;lt;ffffffffa0a952d5&amp;gt;] ? txg_wait_open+0x125/0x130 [zfs]
[ 2434.713676]  [&amp;lt;ffffffffa0a6d5fe&amp;gt;] ? dsl_dir_tempreserve_space+0x1de/0x1f0 [zfs]
[ 2434.715165]  [&amp;lt;ffffffffa0a577fb&amp;gt;] ? dmu_tx_assign+0x47b/0x4b0 [zfs]
[ 2434.716524]  [&amp;lt;ffffffffa0b5655c&amp;gt;] ? osd_trans_start+0x9c/0x430 [osd_zfs]
[ 2434.717950]  [&amp;lt;ffffffffa0d49e1d&amp;gt;] ? ofd_trans_start+0x22d/0x450 [ofd]
[ 2434.719345]  [&amp;lt;ffffffffa0d4e772&amp;gt;] ? ofd_commitrw_write+0x652/0x12a0 [ofd]
[ 2434.720870]  [&amp;lt;ffffffffa0d4f775&amp;gt;] ? ofd_commitrw+0x3b5/0x9c0 [ofd]
[ 2434.722277]  [&amp;lt;ffffffffa0caabd8&amp;gt;] ? obd_commitrw+0x128/0x3d0 [ost]
[ 2434.723621]  [&amp;lt;ffffffffa0cb1e34&amp;gt;] ? ost_brw_write+0xd04/0x15d0 [ost]
[ 2434.725081]  [&amp;lt;ffffffffa05032f0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
[ 2434.726601]  [&amp;lt;ffffffffa0cb7250&amp;gt;] ? ost_handle+0x3120/0x4550 [ost]
[ 2434.727907]  [&amp;lt;ffffffffa01f4464&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 2434.729364]  [&amp;lt;ffffffffa0550483&amp;gt;] ? ptlrpc_server_handle_request+0x463/0xe70 [ptlrpc]
[ 2434.731075]  [&amp;lt;ffffffffa01e866e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 2434.732348]  [&amp;lt;ffffffffa0549171&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 2434.733622]  [&amp;lt;ffffffffa01f86d1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
[ 2434.734745]  [&amp;lt;ffffffff81051f73&amp;gt;] ? __wake_up+0x53/0x70
[ 2434.735682]  [&amp;lt;ffffffffa055301a&amp;gt;] ? ptlrpc_main+0xb9a/0x1960 [ptlrpc]
[ 2434.736831]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2434.737889]  [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
[ 2434.738787]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2434.739860]  [&amp;lt;ffffffffa0552480&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
[ 2434.741105]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
[ 2434.742165]
[ 2434.742496] LustreError: dumping log to /tmp/lustre-log.1350172588.3314
[ 2519.305054] kmemleak: 998 new suspected memory leaks (see /sys/kernel/debug/kmemleak)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="48590" author="utopiabound" created="Fri, 30 Nov 2012 12:02:51 +0000"  >&lt;p&gt;This may not be an issue other than ZFS really wants lots of memory and is kind of a pig about it.&lt;/p&gt;</comment>
                            <comment id="49759" author="jlevi" created="Fri, 28 Dec 2012 13:59:57 +0000"  >&lt;p&gt;Reduced from blocker per Oleg.&lt;/p&gt;</comment>
                            <comment id="120084" author="utopiabound" created="Wed, 1 Jul 2015 17:30:23 +0000"  >&lt;p&gt;There are no instances of this bug in maloo.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzva8v:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5195</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>