<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:54:43 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5809] sanity-benchmark test pios_fpp: OOM on zfs OSS</title>
                <link>https://jira.whamcloud.com/browse/LU-5809</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I hit 3 identical OOM panics during tests on eagle this weekend, all happened on zfs OSS during sanity-benchmark test pios_fpp:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity-benchmark test pios_fpp: pios file per process == 06:54:04 (1414418044)
Lustre: DEBUG MARKER: /usr/sbin/lctl mark \/usr\/bin\/pios  -t 1,8,40 -n 1024                          -c 1M -s 8M                             -o 16M -L fpp -p \/mnt\/lustre\/dpios_fpp.sanity-benchmark
Lustre: DEBUG MARKER: /usr/bin/pios -t 1,8,40 -n 1024 -c 1M -s 8M -o 16M -L fpp -p /mnt/lustre/dpios_fpp.sanity-benchmark
Lustre: lustre-OST0001: Slow creates, 128/256 objects created at a rate of 2/s
LNet: Service thread pid 3372 completed after 91.53s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
LNet: Skipped 15 previous similar messages
Lustre: DEBUG MARKER: /usr/sbin/lctl mark \/usr\/bin\/pios  -t 1,8,40 -n 1024                          -c 1M -s 8M                             -o 16M -L fpp --verify -p \/mnt\/lustre\/dpios_fpp.sanity-benchmark
Lustre: DEBUG MARKER: /usr/bin/pios -t 1,8,40 -n 1024 -c 1M -s 8M -o 16M -L fpp --verify -p /mnt/lustre/dpios_fpp.sanity-benchmark
spl_kmem_cache/ invoked oom-killer: gfp_mask=0x84d0, order=0, oom_adj=0, oom_score_adj=0
spl_kmem_cache/ cpuset=/ mems_allowed=0
Pid: 396, comm: spl_kmem_cache/ Tainted: P           ---------------    2.6.32-431.29.2.el6_lustre.g9835a2a.x86_64 #1
Call Trace:
 [&amp;lt;ffffffff810d07b1&amp;gt;] ? cpuset_print_task_mems_allowed+0x91/0xb0
 [&amp;lt;ffffffff81122b80&amp;gt;] ? dump_header+0x90/0x1b0
 [&amp;lt;ffffffff81122cee&amp;gt;] ? check_panic_on_oom+0x4e/0x80
 [&amp;lt;ffffffff811233db&amp;gt;] ? out_of_memory+0x1bb/0x3c0
 [&amp;lt;ffffffff8112fd5f&amp;gt;] ? __alloc_pages_nodemask+0x89f/0x8d0
 [&amp;lt;ffffffff81167cea&amp;gt;] ? alloc_pages_current+0xaa/0x110
 [&amp;lt;ffffffff8112d15e&amp;gt;] ? __get_free_pages+0xe/0x50
 [&amp;lt;ffffffff8104ec85&amp;gt;] ? pte_alloc_one_kernel+0x15/0x20
 [&amp;lt;ffffffff8114650b&amp;gt;] ? __pte_alloc_kernel+0x1b/0xc0
 [&amp;lt;ffffffff81157769&amp;gt;] ? vmap_page_range_noflush+0x309/0x370
 [&amp;lt;ffffffff81157802&amp;gt;] ? map_vm_area+0x32/0x50
 [&amp;lt;ffffffff81159270&amp;gt;] ? __vmalloc_area_node+0x100/0x190
 [&amp;lt;ffffffffa0115a09&amp;gt;] ? kv_alloc+0x59/0x60 [spl]
 [&amp;lt;ffffffff811590fd&amp;gt;] ? __vmalloc_node+0xad/0x120
 [&amp;lt;ffffffffa0115a09&amp;gt;] ? kv_alloc+0x59/0x60 [spl]
 [&amp;lt;ffffffff811594e2&amp;gt;] ? __vmalloc+0x22/0x30
 [&amp;lt;ffffffffa0115a09&amp;gt;] ? kv_alloc+0x59/0x60 [spl]
 [&amp;lt;ffffffffa0115a49&amp;gt;] ? spl_cache_grow_work+0x39/0x2d0 [spl]
 [&amp;lt;ffffffff81058bd3&amp;gt;] ? __wake_up+0x53/0x70
 [&amp;lt;ffffffffa01174a7&amp;gt;] ? taskq_thread+0x1e7/0x3f0 [spl]
 [&amp;lt;ffffffff81061d00&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa01172c0&amp;gt;] ? taskq_thread+0x0/0x3f0 [spl]
 [&amp;lt;ffffffff8109abf6&amp;gt;] ? kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] ? child_rip+0xa/0x20
 [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
Mem-Info:
Node 0 DMA per-cpu:
CPU    0: hi:    0, btch:   1 usd:   0
Node 0 DMA32 per-cpu:
CPU    0: hi:  186, btch:  31 usd:   0
active_anon:0 inactive_anon:0 isolated_anon:0
 active_file:11 inactive_file:0 isolated_file:0
 unevictable:0 dirty:0 writeback:0 unstable:0
 free:8559 slab_reclaimable:1482 slab_unreclaimable:12252
 mapped:1 shmem:0 pagetables:1242 bounce:0
Node 0 DMA free:8352kB min:332kB low:412kB high:496kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15348kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:0kB slab_unreclaimable:20kB kernel_stack:0kB pagetables:0kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? yes
lowmem_reserve[]: 0 2004 2004 2004
Node 0 DMA32 free:25884kB min:44720kB low:55900kB high:67080kB active_anon:0kB inactive_anon:0kB active_file:44kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:2052308kB mlocked:0kB dirty:0kB writeback:0kB mapped:4kB shmem:0kB slab_reclaimable:5928kB slab_unreclaimable:48988kB kernel_stack:3416kB pagetables:4968kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:100 all_unreclaimable? yes
lowmem_reserve[]: 0 0 0 0
Node 0 DMA: 0*4kB 0*8kB 2*16kB 0*32kB 0*64kB 1*128kB 0*256kB 0*512kB 0*1024kB 2*2048kB 1*4096kB = 8352kB
Node 0 DMA32: 719*4kB 340*8kB 184*16kB 84*32kB 33*64kB 16*128kB 5*256kB 2*512kB 2*1024kB 1*2048kB 1*4096kB = 25884kB
20 total pagecache pages
0 pages in swap cache
Swap cache stats: add 5121, delete 5121, find 16/25
Free swap  = 4108600kB
Total swap = 4128764kB
524284 pages RAM
43654 pages reserved
54 pages shared
465254 pages non-shared
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In all cases, the paniced OSS had 1.8G memory, and ran build lustre-b2_5/96.&lt;/p&gt;</description>
                <environment></environment>
        <key id="27323">LU-5809</key>
            <summary>sanity-benchmark test pios_fpp: OOM on zfs OSS</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="2">Won&apos;t Fix</resolution>
                                        <assignee username="isaac">Isaac Huang</assignee>
                                    <reporter username="isaac">Isaac Huang</reporter>
                        <labels>
                            <label>RZ_LS</label>
                            <label>zfs</label>
                    </labels>
                <created>Mon, 27 Oct 2014 16:39:25 +0000</created>
                <updated>Fri, 21 May 2021 18:42:11 +0000</updated>
                            <resolved>Fri, 21 May 2021 18:42:11 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>1</watches>
                                                                            <comments>
                            <comment id="97591" author="isaac" created="Mon, 27 Oct 2014 16:44:58 +0000"  >&lt;p&gt;The ARC was set to be at most 900M by default, i.e. half of system memory. I couldn&apos;t get arcstats on the OSS due to the OOM, but I&apos;m going to try lowering the ARC max size. The OOM was quite reproducible - 3 out of my 4 test runs hit it (The only success was likely because I forgot to install pios on the client node and thus the pios tests were skipped).&lt;/p&gt;</comment>
                            <comment id="97662" author="isaac" created="Tue, 28 Oct 2014 04:27:08 +0000"  >&lt;p&gt;OOM still reproducible with ARC max at 800M.&lt;/p&gt;</comment>
                            <comment id="97669" author="isaac" created="Tue, 28 Oct 2014 07:01:58 +0000"  >&lt;p&gt;It seemed that prefetching caused ARC to grow over its limit and eventually caused the OOM. I&apos;ve reported it to ZoL:&lt;br/&gt;
&lt;a href=&quot;https://github.com/zfsonlinux/zfs/issues/2840&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/issues/2840&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Once ZFS prefetching was disabled, sanity-benchmark pios_fpp completed successfully.&lt;/p&gt;</comment>
                            <comment id="97802" author="isaac" created="Wed, 29 Oct 2014 04:19:54 +0000"  >&lt;p&gt;With ZFS prefetching disabled on the OSS, two more test runs (sanity-benchmark performance-sanity parallel-scale) completed with 0 error.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="16234" name="eagle-44vm1.log" size="496902" author="isaac" created="Mon, 27 Oct 2014 16:39:25 +0000"/>
                            <attachment id="16235" name="eagle-46vm1.log" size="364416" author="isaac" created="Mon, 27 Oct 2014 16:39:25 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwzjj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>16295</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>