<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:56:01 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12830] RHEL8.3 and ZFS: oom on OSS</title>
                <link>https://jira.whamcloud.com/browse/LU-12830</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for jianyu &amp;lt;yujian@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/1e6f3bc6-e5ef-11e9-b62b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/1e6f3bc6-e5ef-11e9-b62b-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_bonnie failed with oom on OSS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[16526.881544] Lustre: DEBUG MARKER: == sanity-benchmark test bonnie: bonnie++ ============================================================ 14:37:57 (1570027077)
[16528.099983] Lustre: DEBUG MARKER: /usr/sbin/lctl mark min OST has 10511360kB available, using 3438712kB file size
[16528.357585] Lustre: DEBUG MARKER: min OST has 10511360kB available, using 3438712kB file size
[16567.214746] irqbalance invoked oom-killer: gfp_mask=0x200da, order=0, oom_score_adj=0
[16567.215741] irqbalance cpuset=/ mems_allowed=0
[16567.216221] CPU: 1 PID: 1179 Comm: irqbalance Kdump: loaded Tainted: P           OE  ------------   3.10.0-957.27.2.el7_lustre.x86_64 #1
[16567.217451] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[16567.218033] Call Trace:
[16567.218336]  [&amp;lt;ffffffffad565147&amp;gt;] dump_stack+0x19/0x1b
[16567.218874]  [&amp;lt;ffffffffad55fb6a&amp;gt;] dump_header+0x90/0x229
[16567.219422]  [&amp;lt;ffffffffad572b1f&amp;gt;] ? notifier_call_chain+0x4f/0x70
[16567.220055]  [&amp;lt;ffffffffacec91c8&amp;gt;] ? __blocking_notifier_call_chain+0x58/0x70
[16567.220779]  [&amp;lt;ffffffffacfbbaae&amp;gt;] check_panic_on_oom+0x2e/0x60
[16567.221379]  [&amp;lt;ffffffffacfbbecb&amp;gt;] out_of_memory+0x23b/0x4f0
[16567.221938]  [&amp;lt;ffffffffad56066e&amp;gt;] __alloc_pages_slowpath+0x5d6/0x724
[16567.222585]  [&amp;lt;ffffffffacfc2524&amp;gt;] __alloc_pages_nodemask+0x404/0x420
[16567.223225]  [&amp;lt;ffffffffad0128c5&amp;gt;] alloc_pages_vma+0xb5/0x200
[16567.223840]  [&amp;lt;ffffffffad000b15&amp;gt;] __read_swap_cache_async+0x115/0x190
[16567.224491]  [&amp;lt;ffffffffad000bb6&amp;gt;] read_swap_cache_async+0x26/0x60
[16567.225104]  [&amp;lt;ffffffffad000c9c&amp;gt;] swapin_readahead+0xac/0x110
[16567.225690]  [&amp;lt;ffffffffacfead92&amp;gt;] handle_pte_fault+0x812/0xd10
[16567.226280]  [&amp;lt;fffffffface2a621&amp;gt;] ? __switch_to+0x151/0x580
[16567.226858]  [&amp;lt;ffffffffacfed3ad&amp;gt;] handle_mm_fault+0x39d/0x9b0
[16567.227444]  [&amp;lt;ffffffffacec6efd&amp;gt;] ? hrtimer_start_range_ns+0x1ed/0x3c0
[16567.228100]  [&amp;lt;ffffffffad572603&amp;gt;] __do_page_fault+0x203/0x4f0
[16567.228685]  [&amp;lt;ffffffffad5729d6&amp;gt;] trace_do_page_fault+0x56/0x150
[16567.229287]  [&amp;lt;ffffffffad571f62&amp;gt;] do_async_page_fault+0x22/0xf0
[16567.229890]  [&amp;lt;ffffffffad56e798&amp;gt;] async_page_fault+0x28/0x30
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;VVVVVVV DO NOT REMOVE LINES BELOW, Added by Maloo for auto-association VVVVVVV&lt;br/&gt;
ost-pools test_23b - trevis-21vm3 crashed during ost-pools test_23b&lt;/p&gt;</description>
                <environment></environment>
        <key id="57093">LU-12830</key>
            <summary>RHEL8.3 and ZFS: oom on OSS</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>ZFS</label>
                    </labels>
                <created>Fri, 4 Oct 2019 03:02:20 +0000</created>
                <updated>Wed, 22 Jun 2022 18:44:50 +0000</updated>
                            <resolved>Wed, 22 Jun 2022 18:44:50 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                    <version>Lustre 2.15.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="255837" author="yujian" created="Fri, 4 Oct 2019 03:09:05 +0000"  >&lt;p&gt;This failure is specific to Lustre test sessions with ZFS 0.8.1 and 0.8.2. It didn&apos;t occur in test sessions with ZFS 0.7.13.&lt;/p&gt;</comment>
                            <comment id="255878" author="yujian" created="Fri, 4 Oct 2019 03:56:09 +0000"  >&lt;p&gt;obdfilter-survey test 1a also failed with oom on OSS:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/585db5f6-e5ef-11e9-b62b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/585db5f6-e5ef-11e9-b62b-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/a00637aa-e632-11e9-a197-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a00637aa-e632-11e9-a197-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/722833d0-e603-11e9-be86-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/722833d0-e603-11e9-be86-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="255879" author="yujian" created="Fri, 4 Oct 2019 04:21:18 +0000"  >&lt;p&gt;parallel-scale test compilebench hit the same issue:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/6e82be44-e603-11e9-be86-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/6e82be44-e603-11e9-be86-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="262242" author="yujian" created="Thu, 30 Jan 2020 23:35:13 +0000"  >&lt;p&gt;The oom failure also occurred with ZFS 0.8.3:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/b2ac0f98-43a1-11ea-bffa-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/b2ac0f98-43a1-11ea-bffa-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/b2ab5292-43a1-11ea-86b2-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/b2ab5292-43a1-11ea-86b2-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="262976" author="bzzz" created="Mon, 10 Feb 2020 04:57:06 +0000"  >&lt;p&gt;according to testing in autotest (wiht &lt;a href=&quot;https://review.whamcloud.com/#/c/37384/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/37384/&lt;/a&gt; ) all 0.8.* demontrate this behavior. &lt;br/&gt;
all this doesn&apos;t look like a memleak but rather like a increased memory requirements on ZFS side.&lt;br/&gt;
I tried with less number of OSTs per node and all was good up to 4 OSTs.&lt;br/&gt;
another option is to extend memory on OSS which is 2GB by default.&lt;/p&gt;</comment>
                            <comment id="263374" author="yujian" created="Fri, 14 Feb 2020 22:30:55 +0000"  >&lt;p&gt;&lt;del&gt;The same failure also occurred with &lt;b&gt;ldiskfs&lt;/b&gt; on RHEL 8.1:&lt;/del&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/d6331fe6-4ef1-11ea-a90e-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d6331fe6-4ef1-11ea-a90e-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/d70b1166-4907-11ea-aeb7-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d70b1166-4907-11ea-aeb7-52540065bddc&lt;/a&gt;&lt;br/&gt;
Cluent-side OOM moved to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13162&quot; title=&quot;parallel-scale test_statahead: mdsrate invoked oom-killer&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13162&quot;&gt;&lt;del&gt;LU-13162&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="263375" author="yujian" created="Fri, 14 Feb 2020 22:36:06 +0000"  >&lt;blockquote&gt;&lt;p&gt;another option is to extend memory on OSS which is 2GB by default.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Hi Minh, the oom issue affects many tests. Can we increase the memory size on vm nodes?&lt;/p&gt;</comment>
                            <comment id="290504" author="adilger" created="Wed, 27 Jan 2021 22:42:49 +0000"  >&lt;p&gt;Looking at all of the recent failures, the OOM is for ZFS only, and only happening on the OSS.  The two RHEL8.1 ldiskfs failures Jian referenced above were hit on the client.&lt;/p&gt;

&lt;p&gt;Looking at these OOM cases, all of them are due to OOM of a userspace processes (&lt;tt&gt;lctl&lt;/tt&gt; is in the case of &lt;tt&gt;obdfilter-survey&lt;/tt&gt; where it is driving the load):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 5089.324526] CPU: 0 PID: 1175 Comm: tuned 
[20249.901128] CPU: 0 PID: 866 Comm: sssd_be
[31802.020686] CPU: 1 PID: 1074159 Comm: lctl 
[129875.609998] CPU: 1 PID: 1501864 Comm: lctl 
[77898.375312] CPU: 0 PID: 1194 Comm: tuned 
[20897.423508] CPU: 0 PID: 1459 Comm: tuned 
[37225.033993] CPU: 0 PID: 2 Comm: kthreadd
[21544.431269] CPU: 1 PID: 1177 Comm: tuned
[ 4838.509291] CPU: 0 PID: 804 Comm: NetworkManager
[114309.521078] CPU: 1 PID: 795 Comm: NetworkManager
[  914.434369] CPU: 1 PID: 1178 Comm: tuned
[22615.640870] CPU: 1 PID: 889 Comm: sssd_nss
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;According to the meminfo dump at the time,   It doesn&apos;t look like there is a lot of slab in use, only 20563 pages ~= 82MB, and not much in terms of page cache at all.  All of the Lustre-internal allocations would be recorded as part of the slab cache, so it doesn&apos;t &lt;em&gt;seem&lt;/em&gt; like this is a memory leak.  Pagecache pages should &lt;em&gt;probably&lt;/em&gt; appear in &lt;tt&gt;active_file:&lt;/tt&gt; or &lt;tt&gt;inactive_file:&lt;/tt&gt;, unless ZFS is doing something strange with the ARC buffers.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[22615.680857] Mem-Info:
[22615.681353] active_anon:0 inactive_anon:0 isolated_anon:0
 active_file:15 inactive_file:19 isolated_file:0
 unevictable:0 dirty:0 writeback:0 unstable:0
 slab_reclaimable:6140 slab_unreclaimable:20563
 mapped:1 shmem:0 pagetables:1575 bounce:0
 free:12775 free_pcp:15 free_cma:0
[22615.686600] Node 0 active_anon:0kB inactive_anon:0kB active_file:60kB inactive_file:76kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:4kB dirty:0kB writeback:0kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB unstable:0kB all_unreclaimable? yes
[22615.690863] Node 0 DMA free:6816kB min:416kB low:520kB high:624kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:15992kB managed:15360kB mlocked:0kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB
[22615.695119] lowmem_reserve[]: 0 1605 1605 1605 1605
[22615.696004] Node 0 DMA32 free:44284kB min:44636kB low:55792kB high:66948kB active_anon:0kB inactive_anon:0kB active_file:60kB inactive_file:76kB unevictable:0kB writepending:0kB present:2080744kB managed:1678888kB mlocked:0kB kernel_stack:11920kB pagetables:6300kB bounce:0kB free_pcp:60kB local_pcp:0kB free_cma:0kB
[22615.700603] lowmem_reserve[]: 0 0 0 0 0
[22615.701307] Node 0 DMA: 0*4kB 0*8kB 0*16kB 1*32kB (U) 0*64kB 1*128kB (U) 2*256kB (U) 2*512kB (U) 1*1024kB (U) 2*2048kB (UM) 0*4096kB = 6816kB
[22615.703467] Node 0 DMA32: 1237*4kB (UME) 827*8kB (UME) 361*16kB (UME) 198*32kB (UME) 72*64kB (UME) 11*128kB (UM) 7*256kB (UM) 19*512kB (U) 3*1024kB (U) 0*2048kB 0*4096kB = 44284kB
[22615.706168] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
[22615.707684] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
[22615.709160] 34 total pagecache pages
[22615.709848] 0 pages in swap cache
[22615.710472] Swap cache stats: add 47896, delete 47895, find 250384/250507
[22615.711670] Free swap  = 2685692kB
[22615.712302] Total swap = 2860028kB
[22615.712949] 524184 pages RAM
[22615.713507] 0 pages HighMem/MovableOnly
[22615.714206] 100622 pages reserved
[22615.714834] 0 pages hwpoisoned
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I&apos;m not sure what the &quot;100622 pages reserved&quot; (about 400MB of the 2GB) is reserved &lt;b&gt;for&lt;/b&gt;?  Crashdumps?  It looks like this 400MB reserve is the difference between &quot;&lt;tt&gt;present:2080744kB&lt;/tt&gt;&quot; and &quot;&lt;tt&gt;managed:1678888kB&lt;/tt&gt;&quot; in the &lt;tt&gt;DMA32&lt;/tt&gt; zone.  I don&apos;t think that is the specific problem here, since I also see about 100k pages reserved on my RHEL7.9 VM, but that is a fair chunk of memory to hold back.&lt;/p&gt;

&lt;p&gt;What &lt;b&gt;IS&lt;/b&gt; strange is that the &lt;tt&gt;DMA32&lt;/tt&gt; zone shows 2GB present, and 1.6GB managed, but that memory doesn&apos;t show up as &quot;used&quot; in any of the fields, with about 45MB of free space.&lt;/p&gt;

&lt;p&gt;There are several systemd-related processes that are consuming a significant amount of memory (&lt;tt&gt;tuned&lt;/tt&gt;, &lt;tt&gt;polkitd&lt;/tt&gt;, &lt;tt&gt;automount&lt;/tt&gt;), but I&apos;m not sure if they are the main offenders, nor what the exact units of this part of the dump represent (pages or KB; if this is all running processes, or only those with significant memory usage):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[22615.839020] [ pid ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name
[22615.840493] [  544]     0   544    28120        1   274432     1298             0 systemd-journal
[22615.842023] [  579]     0   579    27233        1   225280      539         -1000 systemd-udevd
[22615.843530] [  742]    32   742    16785        0   167936      191             0 rpcbind
[22615.844941] [  750]     0   750    19242        0   147456      168         -1000 auditd
[22615.846372] [  794]     0   794    53697        0   434176      508             0 sssd
[22615.847757] [  800]    81   800    14140        0   163840      237          -900 dbus-daemon
[22615.849229] [  805]     0   805    97895        0   385024      687             0 NetworkManager
[22615.850754] [  813]   998   813   407041        0   331776     1290             0 polkitd
[22615.852173] [  815]     0   815    31224        0   139264      165             0 irqbalance
[22615.853634] [  827]   994   827    32228        0   147456      122             0 chronyd
[22615.855053] [  841]   993   841    58461        1   212992      219             0 rngd
[22615.856444] [  877]     0   877    55424        0   442368      752             0 sssd_be
[22615.857858] [  889]     0   889    56279        0   462848      469             0 sssd_nss
[22615.859299] [  901]     0   901   106578        0   430080     3803             0 tuned
[22615.860696] [  906]     0   906    23072        1   200704      232         -1000 sshd
[22615.862075] [  907]     0   907    34682        0   286720      368             0 cupsd
[22615.863478] [  910]   997   910    30251        0   147456      161             0 munged
[22615.864879] [  931]     0   931    14994        1   147456      148             0 xinetd
[22615.866271] [  953]     0   953    75597        0   303104      561             0 rsyslogd
[22615.867707] [  968]     0   968    25434        0   176128      176             0 gssproxy
[22615.869133] [  988]     0   988    24316        1   212992      300             0 systemd-logind
[22615.870653] [  998]    29   998    17931        0   176128     6297             0 rpc.statd
[22615.872093] [ 1133]     0  1133    30765        1   180224      251             0 master
[22615.873519] [ 1140]    89  1140    37597        0   237568      272             0 qmgr
[22615.874896] [ 1212]     0  1212   173534        0   335872      500             0 automount
[22615.876351] [ 1216]     0  1216    10994        0   114688       53             0 atd
[22615.877707] [ 1217]     0  1217     9232        1   110592      220             0 crond
[22615.879098] [ 1241]     0  1241     4050        0    69632       37             0 agetty
[22615.880508] [ 1242]     0  1242     3408        0    65536       32             0 agetty
[22615.881906] [1250901]    89 1250901    37584        0   237568      271             0 pickup
[22615.883448] Kernel panic - not syncing: Out of memory: system-wide panic_on_oom is enabled
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;One possibility (just a guess) is that there is something with RHEL8 systemd/cgroup configuration that prevent memory pressure from userspace processes releasing pages used for cached objects in the kernel, that wasn&apos;t the case in RHEL7.x?  That still doesn&apos;t explain &lt;b&gt;where&lt;/b&gt; the memory is currently being used.  &lt;/p&gt;

&lt;p&gt;On my RHEL7 VM the &lt;tt&gt;sysrq-m&lt;/tt&gt; dump shows that each zone has a number of pages used for different reasons, while the RHEL8 dump shows almost nothing:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Node 0 DMA32 free:1897928kB min:59336kB low:74168kB high:89004kB active_anon:87160kB inactive_anon:75148kB active_file:74556kB inactive_file:473568kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:3653568kB managed:3407344kB mlocked:0kB dirty:25324kB writeback:0kB mapped:21492kB shmem:91932kB slab_reclaimable:30712kB slab_unreclaimable:66032kB kernel_stack:3712kB pagetables:4132kB unstable:0kB bounce:0kB free_pcp:948kB local_pcp:692kB free_cma:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? no
Node 0 Normal free:217984kB min:7968kB low:9960kB high:11952kB active_anon:12984kB inactive_anon:11080kB active_file:13596kB inactive_file:59896kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:524288kB managed:457212kB mlocked:0kB dirty:1744kB writeback:0kB mapped:2524kB shmem:13212kB slab_reclaimable:9616kB slab_unreclaimable:12632kB kernel_stack:1152kB pagetables:424kB unstable:0kB bounce:0kB free_pcp:956kB local_pcp:700kB free_cma:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? no
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="292787" author="adilger" created="Tue, 23 Feb 2021 17:14:23 +0000"  >&lt;p&gt;What is strange is that it seems some slabs are missing from the dump?  I don&apos;t see &lt;tt&gt;ldlm_locks&lt;/tt&gt; or &lt;tt&gt;ldlm_resources&lt;/tt&gt;, but I thought we are running with &lt;tt&gt;slub_nomerge&lt;/tt&gt; these days?  It still isn&apos;t clear where all the memory has gone...&lt;/p&gt;</comment>
                            <comment id="297314" author="adilger" created="Tue, 30 Mar 2021 20:41:37 +0000"  >&lt;p&gt;+7 failures in the past week:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/7015c24d-a8f5-4dfa-8032-6693a12fc05f&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/7015c24d-a8f5-4dfa-8032-6693a12fc05f&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/3ec6bac8-df96-4a4c-ae97-e93d1546194c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/3ec6bac8-df96-4a4c-ae97-e93d1546194c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/3624fe39-083b-4427-a4c0-f801afc13b54&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/3624fe39-083b-4427-a4c0-f801afc13b54&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/72ecc79f-cc65-48b5-ba39-208fc6d0b9ac&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/72ecc79f-cc65-48b5-ba39-208fc6d0b9ac&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/02596af3-0144-4af4-888f-8ea89a444bbf&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/02596af3-0144-4af4-888f-8ea89a444bbf&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/d8aa05e2-eb9a-4911-a770-bbe5f2d6c613&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d8aa05e2-eb9a-4911-a770-bbe5f2d6c613&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/1a7783dc-9b13-407b-980d-2dce13877b19&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/1a7783dc-9b13-407b-980d-2dce13877b19&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="299131" author="adilger" created="Mon, 19 Apr 2021 05:21:30 +0000"  >&lt;p&gt;+4 failures in the past week, all of ZFS.&lt;/p&gt;</comment>
                            <comment id="300062" author="bzzz" created="Thu, 29 Apr 2021 05:18:10 +0000"  >&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/test_sets/26779a73-fb4b-44d4-b7f4-0892faf505af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/26779a73-fb4b-44d4-b7f4-0892faf505af&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="307269" author="adilger" created="Tue, 13 Jul 2021 19:13:34 +0000"  >&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/search?branch_type_id=&amp;amp;builds=&amp;amp;distribution_type_id=&amp;amp;end_date=2021-07-13&amp;amp;file_system_type_id=&amp;amp;source=sub_tests&amp;amp;start_date=2021-07-07&amp;amp;status%5B%5D=CRASH&amp;amp;sub_test_script_id=8f8b1c92-4dd8-11e1-a8f4-5254004bbbd3&amp;amp;test_groups=&amp;amp;test_set_script_id=6bea3250-3db2-11e0-80c0-52540025f9af&amp;amp;user_id=#redirect&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;+12 failures in the past week&lt;/a&gt;, all ZFS&lt;/p&gt;</comment>
                            <comment id="307904" author="hornc" created="Tue, 20 Jul 2021 22:27:42 +0000"  >&lt;p&gt;+1 &lt;a href=&quot;https://testing.whamcloud.com/test_sets/f54f45ff-11d2-4d46-9063-376613c4a738&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/f54f45ff-11d2-4d46-9063-376613c4a738&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="308509" author="bzzz" created="Tue, 27 Jul 2021 13:49:43 +0000"  >&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/test_sets/642b2113-975a-4311-9388-1e58e808a577&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/642b2113-975a-4311-9388-1e58e808a577&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="309856" author="adilger" created="Tue, 10 Aug 2021 23:30:20 +0000"  >&lt;p&gt;Crashes have been stopped by increasing OST VM RAM to 3GB from 2GB (last autotest failure 2021-07-31), but I don&apos;t think the problem is really &lt;b&gt;fixed&lt;/b&gt;.  That said, it is off our radar for now, until someone is more interested to fix this.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="63941">LU-14643</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="64980">LU-14812</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="57160">LU-12864</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="50878">LU-10687</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="62966">LU-14456</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="55745">LU-12336</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="57935">LU-13178</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="57845">LU-13162</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="59309">LU-13594</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00nnr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>