<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:33:35 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3400] 2.1.5&lt;-&gt;2.4.0 interop: performance-sanity test 7: rank 0: unlink(f530412) error: Interrupted system call</title>
                <link>https://jira.whamcloud.com/browse/LU-3400</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;performance-sanity test 7 failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;0: client-26vm5.lab.whamcloud.com starting at Sat May 25 23:13:16 2013
[client-26vm6.lab.whamcloud.com:27516] 1 more process has sent help message help-mpi-btl-base.txt / btl:no-nics
[client-26vm6.lab.whamcloud.com:27516] Set MCA parameter &quot;orte_base_help_aggregate&quot; to 0 to see all help / error messages
rank 0: unlink(f530412) error: Interrupted system call
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS (client-26vm3) hit oom issue:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == mdsrate-stat-small mdsrate-stat-small.sh: test complete, cleaning up == 23:13:15 (1369548795)
sendmail invoked oom-killer: gfp_mask=0x200da, order=0, oom_adj=0, oom_score_adj=0
sendmail cpuset=/ mems_allowed=0
Pid: 1639, comm: sendmail Not tainted 2.6.32-358.6.2.el6_lustre.g97b2c86.x86_64 #1
Call Trace:
 [&amp;lt;ffffffff810cb5f1&amp;gt;] ? cpuset_print_task_mems_allowed+0x91/0xb0
 [&amp;lt;ffffffff8111cdf0&amp;gt;] ? dump_header+0x90/0x1b0
 [&amp;lt;ffffffff810e92ce&amp;gt;] ? __delayacct_freepages_end+0x2e/0x30
 [&amp;lt;ffffffff8121d3bc&amp;gt;] ? security_real_capable_noaudit+0x3c/0x70
 [&amp;lt;ffffffff8111d272&amp;gt;] ? oom_kill_process+0x82/0x2a0
 [&amp;lt;ffffffff8111d16e&amp;gt;] ? select_bad_process+0x9e/0x120
 [&amp;lt;ffffffff8111d6b0&amp;gt;] ? out_of_memory+0x220/0x3c0
 [&amp;lt;ffffffff8112c35c&amp;gt;] ? __alloc_pages_nodemask+0x8ac/0x8d0
 [&amp;lt;ffffffff81160a5a&amp;gt;] ? alloc_pages_vma+0x9a/0x150
 [&amp;lt;ffffffff811547a2&amp;gt;] ? read_swap_cache_async+0xf2/0x150
 [&amp;lt;ffffffff811552b9&amp;gt;] ? valid_swaphandles+0x69/0x150
 [&amp;lt;ffffffff81154887&amp;gt;] ? swapin_readahead+0x87/0xc0
 [&amp;lt;ffffffff81143d7b&amp;gt;] ? handle_pte_fault+0x70b/0xb50
 [&amp;lt;ffffffff8105e203&amp;gt;] ? perf_event_task_sched_out+0x33/0x80
 [&amp;lt;ffffffff811443fa&amp;gt;] ? handle_mm_fault+0x23a/0x310
 [&amp;lt;ffffffff810474c9&amp;gt;] ? __do_page_fault+0x139/0x480
 [&amp;lt;ffffffff81171ed1&amp;gt;] ? __mem_cgroup_uncharge_common+0x81/0x300
 [&amp;lt;ffffffff811721db&amp;gt;] ? mem_cgroup_uncharge_swapcache+0x2b/0x60
 [&amp;lt;ffffffff81277731&amp;gt;] ? cpumask_any_but+0x31/0x50
 [&amp;lt;ffffffff8104c7e8&amp;gt;] ? flush_tlb_page+0x48/0xb0
 [&amp;lt;ffffffff8104b7cd&amp;gt;] ? ptep_set_access_flags+0x6d/0x70
 [&amp;lt;ffffffff811759e3&amp;gt;] ? swap_cgroup_record+0xa3/0xc0
 [&amp;lt;ffffffff815139ce&amp;gt;] ? do_page_fault+0x3e/0xa0
 [&amp;lt;ffffffff81510d85&amp;gt;] ? page_fault+0x25/0x30
 [&amp;lt;ffffffff812824d6&amp;gt;] ? copy_user_generic_unrolled+0x86/0xb0
 [&amp;lt;ffffffff81010bde&amp;gt;] ? copy_user_generic+0xe/0x20
 [&amp;lt;ffffffff811970b9&amp;gt;] ? set_fd_set+0x49/0x60
 [&amp;lt;ffffffff8119856c&amp;gt;] ? core_sys_select+0x1bc/0x2c0
 [&amp;lt;ffffffff8104757c&amp;gt;] ? __do_page_fault+0x1ec/0x480
 [&amp;lt;ffffffff8103c7b8&amp;gt;] ? pvclock_clocksource_read+0x58/0xd0
 [&amp;lt;ffffffff8103b8ac&amp;gt;] ? kvm_clock_read+0x1c/0x20
 [&amp;lt;ffffffff8103b8b9&amp;gt;] ? kvm_clock_get_cycles+0x9/0x10
 [&amp;lt;ffffffff810a1ac9&amp;gt;] ? ktime_get_ts+0xa9/0xe0
 [&amp;lt;ffffffff811988c7&amp;gt;] ? sys_select+0x47/0x110
 [&amp;lt;ffffffff8100b072&amp;gt;] ? system_call_fastpath+0x16/0x1b
Mem-Info:
Node 0 DMA per-cpu:
CPU    0: hi:    0, btch:   1 usd:   0
Node 0 DMA32 per-cpu:
CPU    0: hi:  186, btch:  31 usd:  30
active_anon:0 inactive_anon:3 isolated_anon:0
 active_file:208092 inactive_file:207909 isolated_file:480
 unevictable:0 dirty:459 writeback:1 unstable:0
 free:13241 slab_reclaimable:13176 slab_unreclaimable:10179
 mapped:1 shmem:0 pagetables:787 bounce:0
Node 0 DMA free:8344kB min:332kB low:412kB high:496kB active_anon:0kB inactive_anon:12kB active_file:3748kB inactive_file:3352kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15324kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:244kB slab_unreclaimable:16kB kernel_stack:16kB pagetables:0kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:10982 all_unreclaimable? yes
lowmem_reserve[]: 0 2003 2003 2003
Node 0 DMA32 free:44620kB min:44720kB low:55900kB high:67080kB active_anon:0kB inactive_anon:0kB active_file:828620kB inactive_file:828284kB unevictable:0kB isolated(anon):0kB isolated(file):1920kB present:2052064kB mlocked:0kB dirty:1836kB writeback:4kB mapped:4kB shmem:0kB slab_reclaimable:52460kB slab_unreclaimable:40700kB kernel_stack:1496kB pagetables:3148kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:1506607 all_unreclaimable? no
lowmem_reserve[]: 0 0 0 0
Node 0 DMA: 50*4kB 32*8kB 25*16kB 20*32kB 15*64kB 6*128kB 2*256kB 1*512kB 2*1024kB 1*2048kB 0*4096kB = 8344kB
Node 0 DMA32: 9623*4kB 2*8kB 2*16kB 2*32kB 2*64kB 0*128kB 1*256kB 1*512kB 1*1024kB 0*2048kB 1*4096kB = 44620kB
69416 total pagecache pages
3 pages in swap cache
Swap cache stats: add 33445, delete 33442, find 17374/22783
Free swap  = 4105544kB
Total swap = 4128760kB
524284 pages RAM
43668 pages reserved
69933 pages shared
392724 pages non-shared
[ pid ]   uid  tgid total_vm      rss cpu oom_adj oom_score_adj name
[  472]     0   472     2726        0   0     -17         -1000 udevd
[  787]     0   787     2660        0   0     -17         -1000 udevd
[ 1131]     0  1131    23299        1   0     -17         -1000 auditd
[ 1147]     0  1147    62367        1   0       0             0 rsyslogd
[ 1189]    32  1189     4743        1   0       0             0 rpcbind
[ 1201]     0  1201    49811        1   0       0             0 sssd
[ 1202]     0  1202    57001        1   0       0             0 sssd_be
[ 1203]     0  1203    50720        1   0       0             0 sssd_nss
[ 1204]     0  1204    48273        1   0       0             0 sssd_pam
[ 1221]    29  1221     6355        1   0       0             0 rpc.statd
[ 1347]    81  1347     5869        1   0       0             0 dbus-daemon
[ 1386]     0  1386     1019        0   0       0             0 acpid
[ 1395]    68  1395     6794        1   0       0             0 hald
[ 1396]     0  1396     4525        1   0       0             0 hald-runner
[ 1424]     0  1424     5054        1   0       0             0 hald-addon-inpu
[ 1434]    68  1434     4450        1   0       0             0 hald-addon-acpi
[ 1454]     0  1454   168269        1   0       0             0 automount
[ 1496]     0  1496    26826        0   0       0             0 rpc.rquotad
[ 1500]     0  1500     5413        0   0       0             0 rpc.mountd
[ 1549]     0  1549     6290        1   0       0             0 rpc.idmapd
[ 1592]   498  1592    58372        1   0       0             0 munged
[ 1607]     0  1607    16028        0   0     -17         -1000 sshd
[ 1615]     0  1615     5523        1   0       0             0 xinetd
[ 1623]    38  1623     7005        1   0       0             0 ntpd
[ 1639]     0  1639    22194        0   0       0             0 sendmail
[ 1647]    51  1647    19539        0   0       0             0 sendmail
[ 1669]     0  1669    29303        1   0       0             0 crond
[ 1680]     0  1680     5363        0   0       0             0 atd
[ 1705]     0  1705     1018        1   0       0             0 agetty
[ 1707]     0  1707     1015        1   0       0             0 mingetty
[ 1709]     0  1709     1015        1   0       0             0 mingetty
[ 1711]     0  1711     1015        1   0       0             0 mingetty
[ 1713]     0  1713     1015        1   0       0             0 mingetty
[ 1714]     0  1714     2725        0   0     -17         -1000 udevd
[ 1716]     0  1716     1015        1   0       0             0 mingetty
[ 1718]     0  1718     1015        1   0       0             0 mingetty
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/2581003a-c624-11e2-9bf1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/2581003a-c624-11e2-9bf1-52540035b04c&lt;/a&gt;&lt;br/&gt;
The console log for client-26vm3 is attached.&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre b2_1 client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/204&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/204&lt;/a&gt;&lt;br/&gt;
Lustre master server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-master/1508&quot;&gt;http://build.whamcloud.com/job/lustre-master/1508&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.4/x86_64&lt;br/&gt;
</environment>
        <key id="19160">LU-3400</key>
            <summary>2.1.5&lt;-&gt;2.4.0 interop: performance-sanity test 7: rank 0: unlink(f530412) error: Interrupted system call</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="ys">Yang Sheng</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                            <label>yuc2</label>
                    </labels>
                <created>Mon, 27 May 2013 07:26:17 +0000</created>
                <updated>Tue, 15 Apr 2014 03:53:31 +0000</updated>
                            <resolved>Tue, 15 Apr 2014 03:53:31 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.1.5</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="59385" author="pjones" created="Mon, 27 May 2013 14:03:27 +0000"  >&lt;p&gt;Yang Sheng&lt;/p&gt;

&lt;p&gt;Could you please look into this issue?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="59505" author="ys" created="Wed, 29 May 2013 06:23:19 +0000"  >&lt;p&gt;Another instance: &lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/e28be6c2-95dc-11e2-9abb-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/e28be6c2-95dc-11e2-9abb-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="59521" author="yujian" created="Wed, 29 May 2013 12:38:30 +0000"  >&lt;p&gt;This is not a new issue. I found the oom issue occurred before:&lt;/p&gt;

&lt;p&gt;b2_1 client with b2_3 server:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6bc8e158-937e-11e2-89cc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6bc8e158-937e-11e2-89cc-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;b2_1 client with b2_2 server:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/3eb99b7c-c7c6-11e2-9f90-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/3eb99b7c-c7c6-11e2-9f90-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Because console logs on MDS nodes were not gathered completely by autotest, we thought the failures were &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-798&quot; title=&quot;Test failure on test suite performance-sanity,subtest test_5&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-798&quot;&gt;&lt;del&gt;LU-798&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;
</comment>
                            <comment id="59855" author="yujian" created="Mon, 3 Jun 2013 09:50:08 +0000"  >&lt;p&gt;Lustre Client: 2.1.6 RC1&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/208&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/208&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Lustre Server: 2.3.0&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/41&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/41&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The oom issue occurred again:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7bcb8202-cc0b-11e2-9cc0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7bcb8202-cc0b-11e2-9cc0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="59922" author="ys" created="Tue, 4 Jun 2013 00:49:39 +0000"  >&lt;p&gt;It&apos;s very useful got /proc/slabinfo while OOM present. &lt;/p&gt;</comment>
                            <comment id="59930" author="yujian" created="Tue, 4 Jun 2013 03:53:17 +0000"  >&lt;blockquote&gt;&lt;p&gt;It&apos;s very useful got /proc/slabinfo while OOM present. &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;I tried to make autotest only run performance-sanity for 2 times on Lustre b2_1 clients with master servers but failed to hit the issue. I&apos;ll try to run more tests with performance-sanity to see whether the issue can be reproduced manually or not.&lt;/p&gt;</comment>
                            <comment id="61282" author="yujian" created="Tue, 25 Jun 2013 03:44:59 +0000"  >&lt;p&gt;Unfortunately, I could not reproduce the issue manually.&lt;/p&gt;

&lt;p&gt;It occurs again in autotest run:&lt;/p&gt;

&lt;p&gt;Lustre Client: 2.1.6 RC2&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/215&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/215&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Lustre Server: 2.3.0&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/41&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/41&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/04eee310-dd0c-11e2-8c97-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/04eee310-dd0c-11e2-8c97-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="61284" author="ys" created="Tue, 25 Jun 2013 03:51:29 +0000"  >&lt;p&gt;Ok, I&apos;ll commit debug patch to autotest then. Hope get slabinfo while OOM hit again. &lt;/p&gt;</comment>
                            <comment id="62897" author="ys" created="Wed, 24 Jul 2013 14:37:46 +0000"  >&lt;p&gt;Got a hit with my debug patch: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/cffedf6e-f3f6-11e2-bc23-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/cffedf6e-f3f6-11e2-bc23-52540035b04c&lt;/a&gt;&lt;br/&gt;
The results point out buffer_head spend too much memory&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: buffer_head 85795 319458 104 37 1 : tunables 120 60 0 : slabdata 8634 8634 0
Lustre: DEBUG MARKER: dentry 40740 40740 192 20 1 : tunables 120 60 0 : slabdata 2037 2037 0
Lustre: DEBUG MARKER: size-32 27245 27440 32 112 1 : tunables 120 60 0 : slabdata 245 245 0
Lustre: DEBUG MARKER: size-64 8637 12567 64 59 1 : tunables 120 60 0 : slabdata 213 213 0
Lustre: DEBUG MARKER: sysfs_dir_cache 8107 8127 144 27 1 : tunables 120 60 0 : slabdata 301 301 0
Lustre: DEBUG MARKER: selinux_inode_security 7163 7208 72 53 1 : tunables 120 60 0 : slabdata 136 136 0
Lustre: DEBUG MARKER: inode_cache 4418 4428 592 6 1 : tunables 54 27 0 : slabdata 738 738 0
Lustre: DEBUG MARKER: size-128 2573 3780 128 30 1 : tunables 120 60 0 : slabdata 126 126 0
Lustre: DEBUG MARKER: vm_area_struct 2809 2869 200 19 1 : tunables 120 60 0 : slabdata 151 151 0
Lustre: DEBUG MARKER: anon_vma_chain 2325 2387 48 77 1 : tunables 120 60 0 : slabdata 31 31 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="64224" author="yujian" created="Wed, 14 Aug 2013 08:28:19 +0000"  >&lt;p&gt;Lustre client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/215/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/215/&lt;/a&gt; (2.1.6)&lt;br/&gt;
Lustre server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_4/29/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_4/29/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;large-scale test hit the same oom issue:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/cb3ccd62-047a-11e3-a8e9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/cb3ccd62-047a-11e3-a8e9-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="76822" author="ys" created="Wed, 12 Feb 2014 13:53:02 +0000"  >&lt;p&gt;Hi, Yujian, Does this issue still present recently? &lt;/p&gt;</comment>
                            <comment id="76937" author="yujian" created="Thu, 13 Feb 2014 04:45:29 +0000"  >&lt;blockquote&gt;&lt;p&gt;Hi, Yujian, Does this issue still present recently?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Hi Yang Sheng, I did not hit oom issue on Lustre 2.4.2 release testing. For the current b2_5 branch, I hit several oom failures reported in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4432&quot; title=&quot;recovery-mds-scale test_failover_ost: tar: Cannot write: Cannot allocate memory&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4432&quot;&gt;&lt;del&gt;LU-4432&lt;/del&gt;&lt;/a&gt;, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4357&quot; title=&quot;page allocation failure. mode:0x40 caused by missing __GFP_WAIT flag&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4357&quot;&gt;&lt;del&gt;LU-4357&lt;/del&gt;&lt;/a&gt; and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3366&quot; title=&quot;Test failure obdfilter-survey, subtest test_1c: oom-killer&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3366&quot;&gt;&lt;del&gt;LU-3366&lt;/del&gt;&lt;/a&gt;. &lt;/p&gt;</comment>
                            <comment id="81582" author="ys" created="Tue, 15 Apr 2014 03:53:31 +0000"  >&lt;p&gt;As Yujian&apos;s comment. So close this one. &lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="12938" name="client-26vm3.console.log" size="364524" author="yujian" created="Mon, 27 May 2013 07:26:17 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvrw7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8416</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>