<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:46:56 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11787] sanityn test 71a fails with &#8216;data is not flushed from client&#8217;</title>
                <link>https://jira.whamcloud.com/browse/LU-11787</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanityn test_71a fails with &#8216;data is not flushed from client&#8217; for ARM clients.  These failures started on October 26, 2018. &lt;/p&gt;

&lt;p&gt;Looking at a  recent failure, &lt;a href=&quot;https://testing.whamcloud.com/test_sets/f1121dd4-fdef-11e8-b837-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/f1121dd4-fdef-11e8-b837-52540065bddc&lt;/a&gt; , we see the following in the client test_log&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanityn test 71a: correct file map just after write operation is finished ========================= 00:36:26 (1544488586)
CMD: trevis-24vm4 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl lustre_build_version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl --version 2&amp;gt;/dev/null | cut -d&apos; &apos; -f2
1+0 records in
1+0 records out
40960 bytes (41 kB) copied, 0.00437335 s, 9.4 MB/s
1+0 records in
1+0 records out
40960 bytes (41 kB) copied, 0.00155542 s, 26.3 MB/s
  File: &apos;/mnt/lustre2/f71a.sanityn&apos;
  Size: 163840    	Blocks: 1          IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115205322894149  Links: 1
Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
Access: 2018-12-11 00:36:27.000000000 +0000
Modify: 2018-12-11 00:36:27.000000000 +0000
Change: 2018-12-11 00:36:27.000000000 +0000
 Birth: -
209708
fd: 3
extent in offset 0, length 122880
flags: 80000000
extent in offset 122880, length 40960
flags: 80000001
No unwritten extents, extents number 2, file size 163840, original size 81920
 sanityn test_71a: @@@@@@ FAIL: data is not flushed from client 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There are no errors in any of the console logs.&lt;/p&gt;

&lt;p&gt;Logs for more of these test failures are at&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/8b2f4282-d9d0-11e8-b46b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/8b2f4282-d9d0-11e8-b46b-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/83526f8c-df5f-11e8-a871-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/83526f8c-df5f-11e8-a871-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/1de368ba-fa38-11e8-bb6b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/1de368ba-fa38-11e8-bb6b-52540065bddc&lt;/a&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="54310">LU-11787</key>
            <summary>sanityn test 71a fails with &#8216;data is not flushed from client&#8217;</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="xinliang">Xinliang Liu</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>arm</label>
                            <label>ppc64</label>
                            <label>ppc64le</label>
                    </labels>
                <created>Sat, 15 Dec 2018 21:07:19 +0000</created>
                <updated>Fri, 26 Jan 2024 17:16:30 +0000</updated>
                            <resolved>Sat, 11 Jun 2022 15:33:17 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                    <version>Lustre 2.13.0</version>
                    <version>Lustre 2.12.1</version>
                    <version>Lustre 2.12.2</version>
                    <version>Lustre 2.12.3</version>
                    <version>Lustre 2.14.0</version>
                    <version>Lustre 2.12.4</version>
                    <version>Lustre 2.12.5</version>
                    <version>Lustre 2.12.6</version>
                    <version>Lustre 2.15.0</version>
                    <version>Lustre 2.15.3</version>
                                    <fixVersion>Lustre 2.16.0</fixVersion>
                    <fixVersion>Lustre 2.15.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="238691" author="pjones" created="Mon, 17 Dec 2018 18:41:30 +0000"  >&lt;p&gt;Jian&lt;/p&gt;

&lt;p&gt;Could you please investigate?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="256668" author="sarah" created="Fri, 18 Oct 2019 18:48:48 +0000"  >&lt;p&gt;hit similar issue on PPC client on 2.12.3&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/dd84edc0-eb0e-11e9-b62b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/dd84edc0-eb0e-11e9-b62b-52540065bddc&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanityn test 71a: correct file map just after write operation is finished ========================= 23:54:23 (1570578863)
CMD: trevis-55vm2 /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl lustre_build_version 2&amp;gt;/dev/null ||
				/usr/sbin/lctl --version 2&amp;gt;/dev/null | cut -d&apos; &apos; -f2
1+0 records in
1+0 records out
40960 bytes (41 kB) copied, 0.00459198 s, 8.9 MB/s
1+0 records in
1+0 records out
40960 bytes (41 kB) copied, 0.000819573 s, 50.0 MB/s
  File: &apos;/mnt/lustre2/f71a.sanityn&apos;
  Size: 163840    	Blocks: 1          IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115205272562497  Links: 1
Access: (0644/-rw-r--r--)  Uid: (    0/    root)   Gid: (    0/    root)
Access: 2019-10-08 23:54:24.000000000 +0000
Modify: 2019-10-08 23:54:24.000000000 +0000
Change: 2019-10-08 23:54:24.000000000 +0000
 Birth: -
149703
fd: 3
error while ioctl 2
 sanityn test_71a: @@@@@@ FAIL: data is not flushed from client 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OSS crash&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[17669.845624] Lustre: DEBUG MARKER: dmesg
[17670.427979] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanityn test 71a: correct file map just after write operation is finished ========================= 23:54:23 \(1570578863\)
[17670.623376] Lustre: DEBUG MARKER: == sanityn test 71a: correct file map just after write operation is finished ========================= 23:54:23 (1570578863)
[17671.194035] general protection fault: 0000 [#1] SMP 
[17671.195205] Modules linked in: dm_flakey osp(OE) ofd(OE) lfsck(OE) ost(OE) mgc(OE) osd_ldiskfs(OE) lquota(OE) fid(OE) fld(OE) ksocklnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) ldiskfs(OE) libcfs(OE) rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache rpcrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod crc_t10dif crct10dif_generic ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_core sunrpc dm_mod iosf_mbi crc32_pclmul ghash_clmulni_intel ppdev aesni_intel lrw gf128mul glue_helper ablk_helper cryptd joydev pcspkr virtio_balloon i2c_piix4 parport_pc parport ip_tables ext4 mbcache jbd2 ata_generic pata_acpi virtio_blk 8139too crct10dif_pclmul crct10dif_common ata_piix crc32c_intel serio_raw
[17671.209748]  libata virtio_pci virtio_ring virtio 8139cp mii floppy [last unloaded: dm_flakey]
[17671.211639] CPU: 1 PID: 27825 Comm: ps Kdump: loaded Tainted: G           OE  ------------   3.10.0-957.27.2.el7_lustre.x86_64 #1
[17671.213628] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[17671.214627] task: ffff9b3dd354c100 ti: ffff9b3debae0000 task.ti: ffff9b3debae0000
[17671.216012] RIP: 0010:[&amp;lt;ffffffffbccf7ab9&amp;gt;]  [&amp;lt;ffffffffbccf7ab9&amp;gt;] cap_capable+0x29/0x80
[17671.217533] RSP: 0018:ffff9b3debae3c60  EFLAGS: 00010283
[17671.218461] RAX: ffffffffbccf7a90 RBX: 000000000000000d RCX: 0000000000000000
[17671.219703] RDX: 0000000000000013 RSI: 80d764bdffffffff RDI: ffff9b3dd3a0d900
[17671.220929] RBP: ffff9b3debae3c60 R08: ffffffffbd64d780 R09: ffff9b3dfd001300
[17671.222150] R10: ffff9b3dfd001300 R11: ffffffffbcbd753d R12: ffff9b3df8fd0000
[17671.223393] R13: 0000000000000000 R14: ffff9b3def57d980 R15: ffffffffbd654920
[17671.224582] FS:  00007fd3c6583880(0000) GS:ffff9b3dffd00000(0000) knlGS:0000000000000000
[17671.225989] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[17671.226966] CR2: 00007fd3c5aba060 CR3: 000000005b5da000 CR4: 00000000000606e0
[17671.228175] Call Trace:
[17671.228700]  [&amp;lt;ffffffffbccf9e6b&amp;gt;] security_capable_noaudit+0x1b/0x20
[17671.229815]  [&amp;lt;ffffffffbcaa6e45&amp;gt;] has_ns_capability_noaudit+0x15/0x20
[17671.230908]  [&amp;lt;ffffffffbcaa7105&amp;gt;] ptrace_has_cap+0x35/0x40
[17671.231875]  [&amp;lt;ffffffffbcaa7b51&amp;gt;] __ptrace_may_access+0x71/0x150
[17671.232896]  [&amp;lt;ffffffffbcaa7efe&amp;gt;] ptrace_may_access+0x2e/0x50
[17671.233885]  [&amp;lt;ffffffffbccc0841&amp;gt;] do_task_stat+0x91/0xbb0
[17671.234850]  [&amp;lt;ffffffffbcc657a4&amp;gt;] ? mntput+0x24/0x40
[17671.235711]  [&amp;lt;ffffffffbcc53ac2&amp;gt;] ? path_openat+0x172/0x640
[17671.236682]  [&amp;lt;ffffffffbcc3f69a&amp;gt;] ? __check_object_size+0x1ca/0x250
[17671.237757]  [&amp;lt;ffffffffbccc1e84&amp;gt;] proc_tgid_stat+0x14/0x20
[17671.238689]  [&amp;lt;ffffffffbccbc062&amp;gt;] proc_single_show+0x52/0x90
[17671.239649]  [&amp;lt;ffffffffbcc69fc0&amp;gt;] seq_read+0x130/0x440
[17671.240555]  [&amp;lt;ffffffffbcc4256f&amp;gt;] vfs_read+0x9f/0x170
[17671.241425]  [&amp;lt;ffffffffbcc4342f&amp;gt;] SyS_read+0x7f/0xf0
[17671.242295]  [&amp;lt;ffffffffbd177d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[17671.243447]  [&amp;lt;ffffffffbd177ddb&amp;gt;] system_call_fastpath+0x22/0x27
[17671.244932]  [&amp;lt;ffffffffbd177d21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[17671.246185] Code: 1f 00 66 66 66 66 90 55 4c 8b 87 80 00 00 00 48 89 e5 eb 09 66 0f 1f 44 00 00 48 89 c6 4c 39 c6 74 28 48 81 fe 80 d7 64 bd 74 3f &amp;lt;48&amp;gt; 8b 86 c8 00 00 00 4c 39 c0 75 e3 8b 86 d0 00 00 00 39 47 14 
[17671.251802] RIP  [&amp;lt;ffffffffbccf7ab9&amp;gt;] cap_capable+0x29/0x80
[17671.252816]  RSP &amp;lt;ffff9b3debae3c60&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="263723" author="jamesanunez" created="Thu, 20 Feb 2020 18:51:32 +0000"  >&lt;p&gt;The patch that landed, &lt;a href=&quot;https://review.whamcloud.com/37561/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37561/&lt;/a&gt;, puts sanityn tests 16a and 71a on the ALWAYS_EXCEPT list for PPC client testing. This ticket should remain open until those tests are fixed and the tests are taken off the list.&lt;/p&gt;</comment>
                            <comment id="315180" author="xinliang" created="Mon, 11 Oct 2021 02:30:17 +0000"  >&lt;p&gt;Arm crashing when running&#160;sanityn test 71a for master branch&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[ 1330.973746] Lustre: DEBUG MARKER: Using TIMEOUT=20
[ 1333.254497] Lustre: Modifying parameter general.*.*.lbug_on_grant_miscount in log params
[ 1338.681588] Lustre: DEBUG MARKER: beegfs-test-01.novalocal: executing check_logdir /tmp/test_logs/2021-10-11/020444
[ 1339.086489] Lustre: DEBUG MARKER: beegfs-test-01.novalocal: executing yml_node
[ 1339.419862] Lustre: DEBUG MARKER: Client: 2.14.52.80
[ 1339.495670] Lustre: DEBUG MARKER: MDS: 2.14.52.80
[ 1339.567308] Lustre: DEBUG MARKER: OSS: 2.14.52.80
[ 1339.627097] Lustre: DEBUG MARKER: -----============= acceptance-small: sanityn ============----- Mon Oct 11 02:05:23 UTC 2021
[ 1339.980338] Lustre: DEBUG MARKER: excepting tests: 28
[ 1340.091948] Lustre: Mounted lustre-client
[ 1340.468327] Lustre: DEBUG MARKER: beegfs-test-01.novalocal: executing check_config_client /mnt/lustre
[ 1341.262854] Lustre: DEBUG MARKER: Using TIMEOUT=20
[ 1341.443750] Lustre: Modifying parameter general.*.*.lbug_on_grant_miscount in log params
[ 1342.212768] Lustre: DEBUG MARKER: == sanityn test 71a: correct file map just after write operation is finished ========================================================== 02:05:25 (1633917925)
[ 1342.335774] LustreError: 4391:0:(osd_io.c:538:osd_do_bio()) ASSERTION( iobuf-&amp;gt;dr_rw == 0 ) failed: page_idx 1, block_idx 16, i 14,start_blocks: 0, count: 30, npages: 3
[ 1342.338924] LustreError: 4391:0:(osd_io.c:538:osd_do_bio()) LBUG
[ 1342.340168] Pid: 4391, comm: ll_ost_io06_002 4.18.0-305.7.1.el8_lustre.aarch64 #1 SMP Mon Jul 19 08:24:26 UTC 2021
[ 1342.342344] Call Trace:
[ 1342.342858] [&amp;lt;0&amp;gt;] libcfs_call_trace+0xb8/0x118 [libcfs]
[ 1342.343908] [&amp;lt;0&amp;gt;] lbug_with_loc+0x60/0xa0 [libcfs]
[ 1342.344888] [&amp;lt;0&amp;gt;] osd_do_bio.constprop.21+0x6c4/0xcf8 [osd_ldiskfs]
[ 1342.346130] [&amp;lt;0&amp;gt;] osd_ldiskfs_map_write.constprop.20+0xb4/0xf0 [osd_ldiskfs]
[ 1342.347535] [&amp;lt;0&amp;gt;] osd_ldiskfs_map_inode_pages+0x66c/0x958 [osd_ldiskfs]
[ 1342.348841] [&amp;lt;0&amp;gt;] osd_write_commit+0x4fc/0xbb8 [osd_ldiskfs]
[ 1342.349971] [&amp;lt;0&amp;gt;] ofd_commitrw_write+0x658/0x1e68 [ofd]
[ 1342.351002] [&amp;lt;0&amp;gt;] ofd_commitrw+0x360/0xa18 [ofd]
[ 1342.352134] [&amp;lt;0&amp;gt;] tgt_brw_write+0x1624/0x2d90 [ptlrpc]
[ 1342.353229] [&amp;lt;0&amp;gt;] tgt_handle_request0+0xd0/0x978 [ptlrpc]
[ 1342.354356] [&amp;lt;0&amp;gt;] tgt_request_handle+0x7c0/0x1a38 [ptlrpc]
[ 1342.355513] [&amp;lt;0&amp;gt;] ptlrpc_server_handle_request+0x3bc/0x11e8 [ptlrpc]
[ 1342.356824] [&amp;lt;0&amp;gt;] ptlrpc_main+0xd28/0x15f0 [ptlrpc]
[ 1342.357786] [&amp;lt;0&amp;gt;] kthread+0x130/0x138
[ 1342.358508] [&amp;lt;0&amp;gt;] ret_from_fork+0x10/0x18
[ 1342.359295] Kernel panic - not syncing: LBUG
[ 1342.360146] CPU: 12 PID: 4391 Comm: ll_ost_io06_002 Kdump: loaded Tainted: P           OE    --------- -  - 4.18.0-305.7.1.el8_lustre.aarch64 #1
[ 1342.362770] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
[ 1342.364231] Call trace:
[ 1342.364751]  dump_backtrace+0x0/0x188
[ 1342.365526]  show_stack+0x24/0x30
[ 1342.366234]  dump_stack+0x9c/0xbc
[ 1342.366937]  panic+0x130/0x2f8
[ 1342.367608]  param_set_delay_minmax.isra.1+0x0/0xd0 [libcfs]
[ 1342.368816]  osd_do_bio.constprop.21+0x6c4/0xcf8 [osd_ldiskfs]
[ 1342.370058]  osd_ldiskfs_map_write.constprop.20+0xb4/0xf0 [osd_ldiskfs]
[ 1342.371477]  osd_ldiskfs_map_inode_pages+0x66c/0x958 [osd_ldiskfs]
[ 1342.372837]  osd_write_commit+0x4fc/0xbb8 [osd_ldiskfs]
[ 1342.373951]  ofd_commitrw_write+0x658/0x1e68 [ofd]
[ 1342.374970]  ofd_commitrw+0x360/0xa18 [ofd]
[ 1342.375934]  tgt_brw_write+0x1624/0x2d90 [ptlrpc]
[ 1342.376999]  tgt_handle_request0+0xd0/0x978 [ptlrpc]
[ 1342.378123]  tgt_request_handle+0x7c0/0x1a38 [ptlrpc]
[ 1342.379258]  ptlrpc_server_handle_request+0x3bc/0x11e8 [ptlrpc]
[ 1342.380587]  ptlrpc_main+0xd28/0x15f0 [ptlrpc]
[ 1342.381530]  kthread+0x130/0x138
[ 1342.382242]  ret_from_fork+0x10/0x18
[ 1342.383021] SMP: stopping secondary CPUs
[ 1342.389567] Starting crashdump kernel...
[ 1342.390356] Bye!

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="316156" author="xinliang" created="Thu, 21 Oct 2021 01:22:00 +0000"  >&lt;p&gt;Note that above crash is another issue, tracking in this bug: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15122&quot; title=&quot;Lustre ASSERTION( iobuf-&amp;gt;dr_rw == 0 ) crash on Arm server end&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15122&quot;&gt;&lt;del&gt;LU-15122&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="318806" author="gerrit" created="Sat, 20 Nov 2021 17:43:23 +0000"  >&lt;p&gt;&quot;James Simmons &amp;lt;jsimmons@infradead.org&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/45629&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/45629&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11787&quot; title=&quot;sanityn test 71a fails with &#8216;data is not flushed from client&#8217;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11787&quot;&gt;&lt;del&gt;LU-11787&lt;/del&gt;&lt;/a&gt; test: re-enable sanityn 71a for 64K page systems&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 49b892c53b16a7d02c29b4a041175d5303022cbb&lt;/p&gt;</comment>
                            <comment id="321002" author="xinliang" created="Thu, 16 Dec 2021 03:43:50 +0000"  >&lt;p&gt;Currently, ldiskfs block allocated is PAGE_SIZE aligned, see function osd_ldiskfs_map_inode_pages(), test case need to be modified to PAGE_SIZE aligned for Arm.&lt;/p&gt;</comment>
                            <comment id="324402" author="adilger" created="Sat, 29 Jan 2022 00:56:11 +0000"  >&lt;p&gt;Looking at the test itself, it seems clear that it won&apos;t work properly on a 64KB PAGE_SIZE system:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;         # write data this way: hole - data - hole - data
        dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
        dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;because the &quot;hole&quot; is from 80KB-120KB, and with 64KB PAGE_SIZE the 40-80KB write will actually dirty the 64KB-128KB page and no hole is created...&lt;/p&gt;

&lt;p&gt;The test needs to be updated to use seek=0 for the first dd and seek=4 for the second dd and it should pass.  It may need some adjustments to the later checks, but at least there will be a hole from 64KB-128KB for 64KB PAGE_SIZE clients, and 40-160KB for 4KB PAGE_SIZE clients.  If we want to make the file consistent, it should use bs=64K.&lt;/p&gt;</comment>
                            <comment id="337394" author="gerrit" created="Sat, 11 Jun 2022 05:32:17 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/45629/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/45629/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11787&quot; title=&quot;sanityn test 71a fails with &#8216;data is not flushed from client&#8217;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11787&quot;&gt;&lt;del&gt;LU-11787&lt;/del&gt;&lt;/a&gt; test: Fix checkfilemap tests for 64K page&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 7c88dfd28b5cc6114a85f187ecb2473657d42c9d&lt;/p&gt;</comment>
                            <comment id="337496" author="pjones" created="Sat, 11 Jun 2022 15:33:17 +0000"  >&lt;p&gt;Landed for 2.16&lt;/p&gt;</comment>
                            <comment id="375151" author="gerrit" created="Mon, 12 Jun 2023 22:33:42 +0000"  >&lt;p&gt;&quot;Andreas Dilger &amp;lt;adilger@whamcloud.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/51287&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/51287&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11787&quot; title=&quot;sanityn test 71a fails with &#8216;data is not flushed from client&#8217;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11787&quot;&gt;&lt;del&gt;LU-11787&lt;/del&gt;&lt;/a&gt; test: Fix checkfilemap tests for 64K page&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_15&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 5755b9c9148cfe235fb2f4dcb066eb0c83fab274&lt;/p&gt;</comment>
                            <comment id="381030" author="gerrit" created="Wed, 2 Aug 2023 06:21:31 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/51287/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/51287/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11787&quot; title=&quot;sanityn test 71a fails with &#8216;data is not flushed from client&#8217;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11787&quot;&gt;&lt;del&gt;LU-11787&lt;/del&gt;&lt;/a&gt; test: Fix checkfilemap tests for 64K page&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_15&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 66d9916dc947064b598f52476fb6482c0bbaff10&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="66713">LU-15122</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="49488">LU-10300</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i0084f:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>