<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:33:04 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-17151] sanity: test_411b Error: &apos;(3) failed to write successfully&apos; </title>
                <link>https://jira.whamcloud.com/browse/LU-17151</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Serguei Smirnov &amp;lt;ssmirnov@ddn.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/2a3fbe0b-f784-4875-bd67-6ab32aa223a3&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/2a3fbe0b-f784-4875-bd67-6ab32aa223a3&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Test session details:&lt;br/&gt;
clients: &lt;a href=&quot;https://build.whamcloud.com/job/lustre-reviews/99011&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.whamcloud.com/job/lustre-reviews/99011&lt;/a&gt; - 4.18.0-425.10.1.el8_7.aarch64&lt;br/&gt;
servers: &lt;a href=&quot;https://build.whamcloud.com/job/lustre-reviews/99011&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.whamcloud.com/job/lustre-reviews/99011&lt;/a&gt; - 4.18.0-477.21.1.el8_lustre.x86_64&lt;/p&gt;

&lt;p&gt;sanity test_411b: @@@@@@ FAIL: (3) failed to write successfully &lt;br/&gt;
  Trace dump:&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:6700:error()&lt;br/&gt;
  = /usr/lib64/lustre/tests/sanity.sh:27545:test_411b()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:7040:run_one()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:7096:run_one_logged()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:6926:run_test()&lt;br/&gt;
  = /usr/lib64/lustre/tests/sanity.sh:27592:main()&lt;br/&gt;
Dumping lctl log to /autotest/autotest-2/2023-09-27/lustre-reviews_review-ldiskfs-dne-arm_99011_29_0b05909e-9d3c-46a3-9f81-125f5c37cc5d//sanity.test_411b.*.1695797605.log&lt;br/&gt;
CMD: trevis-108vm17.trevis.whamcloud.com,trevis-108vm18,trevis-72vm4,trevis-72vm5,trevis-72vm6 /usr/sbin/lctl dk &amp;gt; /autotest/autotest-2/2023-09-27/lustre-reviews_review-ldiskfs-dne-arm_99011_29_0b05909e-9d3c-46a3-9f81-125f5c37cc5d//sanity.test_411b.debug_log.\$(hostname -s).1695797605.log;&lt;br/&gt;
		dmesg &amp;gt; /autotest/autotest-2/2023-09-27/lustre-reviews_review-ldiskfs-dne-arm_99011_29_0b05909e-9d3c-46a3-9f81-125f5c37cc5d//sanity.test_411b.dmesg.\$(hostname -s).1695797605.log&lt;br/&gt;
cache 19660800&lt;br/&gt;
rss 0&lt;br/&gt;
rss_huge 0&lt;br/&gt;
shmem 0&lt;br/&gt;
mapped_file 0&lt;br/&gt;
dirty 2883584&lt;br/&gt;
writeback 0&lt;br/&gt;
swap 2949120&lt;br/&gt;
pgpgin 21206&lt;br/&gt;
pgpgout 20906&lt;br/&gt;
pgfault 4034&lt;br/&gt;
pgmajfault 417&lt;br/&gt;
inactive_anon 0&lt;br/&gt;
active_anon 0&lt;br/&gt;
inactive_file 18022400&lt;br/&gt;
active_file 1638400&lt;br/&gt;
unevictable 0&lt;br/&gt;
hierarchical_memory_limit 268435456&lt;br/&gt;
hierarchical_memsw_limit 9223372036854710272&lt;br/&gt;
total_cache 19660800&lt;br/&gt;
total_rss 0&lt;br/&gt;
total_rss_huge 0&lt;br/&gt;
total_shmem 0&lt;br/&gt;
total_mapped_file 0&lt;br/&gt;
total_dirty 2883584&lt;br/&gt;
total_writeback 0&lt;br/&gt;
total_swap 2949120&lt;br/&gt;
total_pgpgin 21206&lt;br/&gt;
total_pgpgout 20906&lt;br/&gt;
total_pgfault 4034&lt;br/&gt;
total_pgmajfault 417&lt;br/&gt;
total_inactive_anon 0&lt;br/&gt;
total_active_anon 0&lt;br/&gt;
total_inactive_file 18022400&lt;br/&gt;
total_active_file 1638400&lt;br/&gt;
total_unevictable 0&lt;/p&gt;</description>
                <environment></environment>
        <key id="78156">LU-17151</key>
            <summary>sanity: test_411b Error: &apos;(3) failed to write successfully&apos; </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="4" iconUrl="https://jira.whamcloud.com/images/icons/statuses/reopened.png" description="This issue was once resolved, but the resolution was deemed incorrect. From here issues are either marked assigned or resolved.">Reopened</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="qian_wc">Qian Yingjin</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 27 Sep 2023 14:46:11 +0000</created>
                <updated>Sun, 28 Jan 2024 04:29:46 +0000</updated>
                                                            <fixVersion>Lustre 2.16.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="387944" author="bzzz" created="Tue, 3 Oct 2023 09:16:47 +0000"  >&lt;p&gt;cgroup limit causes OOM which in turn kills dd&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[16546.136239] oom-kill:constraint=CONSTRAINT_MEMCG,nodemask=(&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;),cpuset=/,mems_allowed=0,oom_memcg=/f411b.sanity,task_memcg=/f411b.sanity,task=dd,pid=1071239,uid=0
[16546.146364] Memory cgroup out of memory: Killed process 1071239 (dd) total-vm:36672kB, anon-rss:0kB, file-rss:2048kB, shmem-rss:0kB, UID:0 pgtables:384kB oom_score_adj:0
[16546.861389] dd invoked oom-killer: gfp_mask=0x6200ca(GFP_HIGHUSER_MOVABLE), order=0, oom_score_adj=0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="387963" author="arshad512" created="Tue, 3 Oct 2023 13:02:46 +0000"  >&lt;p&gt;+&lt;ins&gt;1 On Master +&lt;a href=&quot;https://testing.whamcloud.com/sub_tests/bfbca56f-69b4-4c4c-94e1-f2b801dbc276&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/sub_tests/bfbca56f-69b4-4c4c-94e1-f2b801dbc276&lt;/a&gt;&lt;/ins&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;========================================================== 10:28:52 (1696328932)
[19651.231511] Lustre: DEBUG MARKER: == sanity test 411b: confirm Lustre can avoid OOM with reasonable cgroups limits ========================================================== 10:28:52 (1696328932)
[19669.199217] dd invoked oom-killer: gfp_mask=0x6200ca(GFP_HIGHUSER_MOVABLE), order=0, oom_score_adj=0
[19669.208834] CPU: 1 PID: 1254796 Comm: dd Kdump: loaded Tainted: G OE --------- - - 4.18.0-425.10.1.el8_7.aarch64 #1
[19669.216852] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
[19669.221748] Call trace:
[19669.223597] dump_backtrace+0x0/0x178
[19669.226811] show_stack+0x28/0x38
[19669.229191] dump_stack+0x5c/0x74
[19669.231641] dump_header+0x4c/0x1e0
[19669.234205] oom_kill_process+0x18c/0x190
[19669.237056] out_of_memory+0x188/0x518
[19669.239728] mem_cgroup_out_of_memory+0xf8/0x108
[19669.243134] try_charge+0x6a8/0x6f0
[19669.245659] __mem_cgroup_charge+0x48/0xe8
[19669.248531] mem_cgroup_swapin_charge_page+0x68/0x170
[19669.252071] __read_swap_cache_async+0x1d8/0x280
[19669.255316] swap_cluster_readahead+0x184/0x300
[19669.258578] swapin_readahead+0x2a8/0x3cc
[19669.261400] do_swap_page+0x53c/0x828
[19669.264147] __handle_mm_fault+0x260/0x698
[19669.267037] handle_mm_fault+0xe4/0x188
[19669.269721] do_page_fault+0x164/0x488
[19669.272493] do_translation_fault+0xa0/0xb0
[19669.275595] do_mem_abort+0x54/0xb0
[19669.278156] el1_abort+0x28/0x30
[19669.280633] el1_sync_handler+0x70/0x120
[19669.283516] el1_sync+0x7c/0x100
[19669.285854] fault_in_readable+0x9c/0x188
[19669.288769] fault_in_iov_iter_readable+0x80/0xd0
[19669.292460] generic_perform_write+0x7c/0x1b8
[19669.295657] __generic_file_write_iter+0x104/0x1e8
[19669.299209] vvp_io_write_start+0x4d8/0x1058 [lustre]
[19669.303501] cl_io_start+0x98/0x1d8 [obdclass]
[19669.307477] cl_io_loop+0xcc/0x2a0 [obdclass]
[19669.310836] ll_file_io_generic+0x3dc/0xfe0 [lustre]
[19669.314709] ll_file_write_iter+0x7d4/0xa58 [lustre]
[19669.318471] new_sync_write+0x104/0x158
[19669.321468] __vfs_write+0x78/0x90
[19669.324000] vfs_write+0xb0/0x1b8
[19669.326565] ksys_write+0x70/0xd8
[19669.328989] __arm64_sys_write+0x28/0x38
[19669.331899] do_el0_svc+0xb4/0x188
[19669.334521] el0_sync_handler+0x88/0xac
[19669.337306] el0_sync+0x140/0x180
[19669.340071] memory: usage 262144kB, limit 262144kB, failcnt 15083
[19669.344527] memory+swap: usage 328128kB, limit 9007199254740928kB, failcnt 0
[19669.349159] kmem: usage 192kB, limit 9007199254740928kB, failcnt 0
[19669.353303] Memory cgroup stats for /f411b.sanity:
[19669.353714] anon 0
file 163250176
kernel_stack 0
pagetables 0
percpu 0
sock 0
shmem 0
file_mapped 0
file_dirty 3866624
file_writeback 0
swapcached 606339072
anon_thp 0
file_thp 0
shmem_thp 0
inactive_anon 131072
active_anon 0
inactive_file 265682944
active_file 2424832
unevictable 0
slab_reclaimable 70840
slab_unreclaimable 13632
slab 84472
workingset_refault_anon 8219
workingset_refault_file 23
workingset_activate_anon 8177
workingset_activate_file 3
workingset_restore_anon 7464
workingset_restore_file 2
workingset_nodereclaim 0
pgfault 13248
pgmajfault 1427
pgrefill 32282
pgscan 2242586
pgsteal 26364
pgactivate 31809
pgdeactivate 32273
pglazyfree 0
pglazyfreed 0
thp_fault_alloc 0
thp_collapse_alloc 0
[19669.422320] Tasks state (memory values in pages):
[19669.425648] [ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name
[19669.431758] [1254796] 0 1254796 1085 32 393216 1068 0 dd
[19669.437512] oom-kill:constraint=CONSTRAINT_MEMCG,nodemask=(null),cpuset=/,mems_allowed=0,oom_memcg=/f411b.sanity,task_memcg=/f411b.sanity,task=dd,pid=1254796,uid=0
[19669.447727] Memory cgroup out of memory: Killed process 1254796 (dd) total-vm:69440kB, anon-rss:0kB, file-rss:2048kB, shmem-rss:0kB, UID:0 pgtables:384kB oom_score_adj:0
[19670.119273] Lustre: DEBUG MARKER: /usr/sbin/lctl mark sanity test_411b: @@@@@@ FAIL: (3) failed to write successfully 
[19670.662747] Lustre: DEBUG MARKER: sanity test_411b: @@@@@@ FAIL: (3) failed to write successfully
[19671.323379] Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /autotest/autotest-2/2023-10-03/lustre-reviews_review-ldiskfs-dne-arm_99080_29_acee458f-e73b-455d-b9a7-38fbc1e13ed6//sanity.test_411b.debug_log.$(hostname -s).1696328953.log;
dmesg &amp;gt; /autotest/autotest-2/2023-10-03/lustre-reviews_rev&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="388042" author="arshad512" created="Wed, 4 Oct 2023 06:02:40 +0000"  >&lt;p&gt;+1 on Master &lt;a href=&quot;https://testing.whamcloud.com/test_sets/a2c9650c-efdc-49bf-80bb-7770e72cd53a&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a2c9650c-efdc-49bf-80bb-7770e72cd53a&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="388196" author="adilger" created="Thu, 5 Oct 2023 07:37:13 +0000"  >&lt;p&gt;Hi Yingjin, can you please take a look at this.  The sanity.sh test_411b added from patch &lt;a href=&quot;https://review.whamcloud.com/50544&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/50544&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-16713&quot; title=&quot;Writeback and commit pages under memory pressure to avoid OOM&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-16713&quot;&gt;&lt;del&gt;LU-16713&lt;/del&gt;&lt;/a&gt; llite: writeback/commit pages under memory pressure&lt;/tt&gt;&quot; is failing regularly since it landed (and once before landing on &lt;tt&gt;master-next&lt;/tt&gt;).  There is also a regular error &quot;&lt;tt&gt;error writing to file from 1249987&lt;/tt&gt;&quot;.&lt;/p&gt;</comment>
                            <comment id="388197" author="adilger" created="Thu, 5 Oct 2023 07:49:26 +0000"  >&lt;p&gt;It looks like almost all of the failures are on &lt;tt&gt;aarch64&lt;/tt&gt; clients (48/58 failures).&lt;/p&gt;</comment>
                            <comment id="388592" author="gerrit" created="Tue, 10 Oct 2023 00:13:33 +0000"  >&lt;p&gt;&quot;Timothy Day &amp;lt;timday@amazon.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/52610&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/52610&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-17151&quot; title=&quot;sanity: test_411b Error: &amp;#39;(3) failed to write successfully&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-17151&quot;&gt;LU-17151&lt;/a&gt; tests: increase 411b mem limit for arm&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8444e3f8d5d467ec7653c713529806d6123a449c&lt;/p&gt;</comment>
                            <comment id="389418" author="simmonsja" created="Mon, 16 Oct 2023 13:34:50 +0000"  >&lt;p&gt;Can we land this since this issue is very common.&lt;/p&gt;</comment>
                            <comment id="390591" author="gerrit" created="Wed, 25 Oct 2023 18:09:17 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/52610/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/52610/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-17151&quot; title=&quot;sanity: test_411b Error: &amp;#39;(3) failed to write successfully&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-17151&quot;&gt;LU-17151&lt;/a&gt; tests: increase sanity/411b memory limit&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 0e878390e1c8c5883bccd01758392eaa16a67f31&lt;/p&gt;</comment>
                            <comment id="390618" author="pjones" created="Wed, 25 Oct 2023 19:59:15 +0000"  >&lt;p&gt;Landed for 2.16&lt;/p&gt;</comment>
                            <comment id="391204" author="nangelinas" created="Tue, 31 Oct 2023 15:32:08 +0000"  >&lt;p&gt;There is another failure on x86_64, on the first dd invocation in the test, on latest master that includes this fix at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/36fdecc1-7e58-410d-b1eb-7345e901b7e5&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/36fdecc1-7e58-410d-b1eb-7345e901b7e5&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="393672" author="adilger" created="Mon, 20 Nov 2023 22:54:55 +0000"  >&lt;p&gt;It looks like this is still failing fairly regularly in the past week:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/search?status%5B%5D=FAIL&amp;amp;test_set_script_id=f9516376-32bc-11e0-aaee-52540025f9ae&amp;amp;sub_test_script_id=e877b2b8-1ca3-4655-836b-81fb1693f948&amp;amp;start_date=2023-11-13&amp;amp;end_date=2023-11-20&amp;amp;source=sub_tests#redirect&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/search?status%5B%5D=FAIL&amp;amp;test_set_script_id=f9516376-32bc-11e0-aaee-52540025f9ae&amp;amp;sub_test_script_id=e877b2b8-1ca3-4655-836b-81fb1693f948&amp;amp;start_date=2023-11-13&amp;amp;end_date=2023-11-20&amp;amp;source=sub_tests#redirect&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="393676" author="adilger" created="Mon, 20 Nov 2023 23:16:17 +0000"  >&lt;p&gt;It looks like those failures are despite patch &lt;a href=&quot;https://review.whamcloud.com/52610&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/52610&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-17151&quot; title=&quot;sanity: test_411b Error: &amp;#39;(3) failed to write successfully&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-17151&quot;&gt;LU-17151&lt;/a&gt; tests: increase sanity/411b memory limit&lt;/tt&gt;&quot; having landed.&lt;/p&gt;</comment>
                            <comment id="396504" author="paf0186" created="Tue, 12 Dec 2023 22:40:29 +0000"  >&lt;p&gt;This is failing quite a bit on master right now.&lt;/p&gt;</comment>
                            <comment id="398937" author="qian_wc" created="Tue, 9 Jan 2024 09:07:06 +0000"  >&lt;p&gt;Should we increase the memory limit value in the test script or enforce the test only running on real environment not VM?&lt;/p&gt;</comment>
                            <comment id="398947" author="bzzz" created="Tue, 9 Jan 2024 11:12:38 +0000"  >&lt;p&gt;do I understand correctly that the all last runs were tmpfs-based?&lt;/p&gt;</comment>
                            <comment id="398950" author="adilger" created="Tue, 9 Jan 2024 11:33:30 +0000"  >&lt;p&gt;Yes, it is using tmpfs on the VM host for the MDT since August or so.  It is exported to the VM guest as a block device. The OSTs are still HDD I believe. &lt;/p&gt;</comment>
                            <comment id="398951" author="bzzz" created="Tue, 9 Jan 2024 11:43:26 +0000"  >&lt;p&gt;my observation with tmpfs-based ZFS is that it tends do not reuse blocks instead allocate new ones (&quot;inner&quot;) and this leads to memory overuse. can be fixed running &quot;zfs trim ..&quot; once few minutes/subtests.&lt;/p&gt;</comment>
                            <comment id="399035" author="adilger" created="Tue, 9 Jan 2024 20:24:47 +0000"  >&lt;p&gt;Does trim get passed from the VM down to the host to free the memory?  You could try adding a patch to this subtest to run &lt;tt&gt;fstrim&lt;/tt&gt; or &lt;tt&gt;zfs trim&lt;/tt&gt; on all of the targets before or during the test.  However, if only the MDT is on tmpfs then I don&apos;t think these tests are using much memory there.&lt;/p&gt;

&lt;p&gt;It would be useful to add some debugging to see where all of the memory &lt;b&gt;is&lt;/b&gt; used (slabinfo/meminfo) so that we can reduce the size.  Some of the internal data structures are allocated/limited at mount time based on the total RAM size and not limited by the cgroup size (e.g. lu cache, max_dirty_mb, etc), and the client needs to do a better job to free this memory under pressure (e.g. registered shrinker).  Should we do anything to flush the cache at the &lt;b&gt;start&lt;/b&gt; of the test, so that the process in the cgroup is not penalized by previous allocations outside its control?&lt;/p&gt;

&lt;p&gt;It would also be good to improve the test scripts slightly to match proper test script style:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;no need for &quot;&lt;tt&gt;trap 0&lt;/tt&gt;&quot; in &lt;tt&gt;cleanup_test411_cgroup()&lt;/tt&gt; since this would clobber any other registered &lt;tt&gt;stack_trap&lt;/tt&gt; calls&lt;/li&gt;
	&lt;li&gt;in &lt;tt&gt;test_411a&lt;/tt&gt; use &quot;&lt;tt&gt;stack_trap cleanup_test411_cgroup&lt;/tt&gt;&quot; to do the cleanup instead of calling it explicitly&lt;/li&gt;
	&lt;li&gt;in &lt;tt&gt;test_411b&lt;/tt&gt; add &quot;&lt;tt&gt;stack_trap &apos;rm -f $DIR/$tfile.&amp;#42;&apos;&lt;/tt&gt;&quot; to clean up the files even if the test fails&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="399074" author="bzzz" created="Wed, 10 Jan 2024 06:48:49 +0000"  >&lt;blockquote&gt;&lt;p&gt;Does trim get passed from the VM down to the host to free the memory?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;yes, here are my local runs:&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;sanity on ZFS w/o trim: 9785 MBs allocated from the host by sanity&apos;s completion&lt;/li&gt;
	&lt;li&gt;sanity on ZFS w/ zfs trim in run_one_logged: 8159 MBs&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="401545" author="adilger" created="Sun, 28 Jan 2024 04:29:46 +0000"  >&lt;p&gt;22 failures in the past week&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="75440">LU-16713</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="78349">LU-17183</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i03wxj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>