<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:55:20 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12752] osc_page.c:osc_page_delete() ASSERTION( 0 ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-12752</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;A specific type of jobs (which seems to be Spark+Python) keeps triggering the following LBUG on different clients:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;/var/log/nodes/sh-110-01.log:Sep  9 12:09:32 sh-110-01 kernel: LustreError: 123514:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep  9 08:53:09 sh-110-04 kernel: LustreError: 89537:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep  9 12:47:29 sh-110-04 kernel: LustreError: 53455:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep  9 12:47:29 sh-110-04 kernel: Kernel panic - not syncing: LBUG
/var/log/nodes/sh-110-04.log:Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 11:48:43 sh-110-04 kernel: LustreError: 104908:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 12:29:58 sh-110-04 kernel: LustreError: 19176:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 12:45:09 sh-110-04 kernel: LustreError: 19196:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 13:58:56 sh-110-04 kernel: LustreError: 14313:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 15:01:22 sh-110-04 kernel: LustreError: 11772:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 16:59:34 sh-110-04 kernel: LustreError: 10094:0:(osc_page.c:193:osc_page_delete()) LBUG
/var/log/nodes/sh-110-04.log:Sep 10 18:29:22 sh-110-04 kernel: LustreError: 25649:0:(osc_page.c:193:osc_page_delete()) LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Sep  9 14:08:38 sh-110-04 kernel: perf: interrupt took too long (4932 &amp;gt; 4930), lowering kernel.perf_event_max_sample_rate to 40000
Sep  9 14:10:01 sh-110-04 CROND[24303]: (root) CMD (/usr/lib64/sa/sa1 1 1)      
Sep  9 14:20:01 sh-110-04 CROND[25059]: (root) CMD (/usr/lib64/sa/sa1 1 1)      
Sep  9 14:30:01 sh-110-04 CROND[25808]: (root) CMD (/usr/lib64/sa/sa1 1 1)      
Sep  9 14:32:01 sh-110-04 kernel: LNetError: 8091:0:(lib-msg.c:811:lnet_is_health_check()) Msg is in inconsistent state, don&apos;t perform health checking (0, 5)
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_cache.c:2575:osc_teardown_async_page()) extent ffff94ab962a3c30@{[32 -&amp;gt; 102/4095], [2|0|-|cache|wi|ffff9498052c8f00], [315392|71|+|-|ffff94c319791b00|4096|          (null)]} trunc at 32.
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_cache.c:2575:osc_teardown_async_page()) ### extent: ffff94ab962a3c30 ns: fir-OST0028-osc-ffff9496d9c3e000 lock: ffff94c319791b00/0x6233ab8351d87f8e lrc: 2/0,0 mode: PW/PW res: [0xa00000401:0x2db265b:0x0].0x0 rrc: 2 type: EXT [0-&amp;gt;18446744073709551615] (req 131072-&amp;gt;16777215) flags: 0x20000000000 nid: local remote: 0xcd88a3c067e066be expref: -99 pid: 26040 timeout: 0 lvb_type: 1
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) page@ffff94b6232a7600[2 ffff949c67e4db30 4 1           (null)]
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) vvp-page@ffff94b6232a7650(0:0) vm@ffffe46433a31e00 6fffff0000087d 3:0 ffff94b6232a7600 32 lru
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) lov-page@ffff94b6232a7690, comp index: 10000, gen: 7
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) osc-page@ffff94b6232a76c8 32: 1&amp;lt; 0x845fed 2 0 + - &amp;gt; 2&amp;lt; 131072 0 4096 0x0 0x420 |           (null) ffff94ad27592540 ffff9498052c8f00 &amp;gt; 3&amp;lt; 0 0 0 &amp;gt; 4&amp;lt; 0 0 8 210399232 - | - - + - &amp;gt; 5&amp;lt; - - + - | 0 - | 71 - -&amp;gt;
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) end page@ffff94b6232a7600
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:192:osc_page_delete()) Trying to teardown failed: -16
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:193:osc_page_delete()) ASSERTION( 0 ) failed: 
Sep  9 14:36:54 sh-110-04 kernel: LustreError: 26083:0:(osc_page.c:193:osc_page_delete()) LBUG
Sep  9 14:36:54 sh-110-04 kernel: Pid: 26083, comm: migratefs 3.10.0-957.27.2.el7.x86_64 #1 SMP Mon Jul 29 17:46:05 UTC 2019
Sep  9 14:36:54 sh-110-04 kernel: Call Trace:                                   
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc0b9c7cc&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc0b9c87c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs] 
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc0dd08bf&amp;gt;] osc_page_delete+0x48f/0x500 [osc]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc0eea770&amp;gt;] cl_page_delete0+0x80/0x220 [obdclass]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc0eea943&amp;gt;] cl_page_delete+0x33/0x110 [obdclass]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc120ff3f&amp;gt;] ll_invalidatepage+0x7f/0x170 [lustre]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd3c7ddd&amp;gt;] do_invalidatepage_range+0x7d/0x90
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd3c7e87&amp;gt;] truncate_inode_page+0x77/0x80 
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd3c80ba&amp;gt;] truncate_inode_pages_range+0x1ea/0x700
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd3c863f&amp;gt;] truncate_inode_pages_final+0x4f/0x60
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffc11f656f&amp;gt;] ll_delete_inode+0x4f/0x230 [lustre]
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd45ff24&amp;gt;] evict+0xb4/0x180         
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd46082c&amp;gt;] iput+0xfc/0x190          
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd4548de&amp;gt;] do_unlinkat+0x1ae/0x2d0  
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd45595b&amp;gt;] SyS_unlinkat+0x1b/0x40   
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffbd976ddb&amp;gt;] system_call_fastpath+0x22/0x27
Sep  9 14:36:54 sh-110-04 kernel: [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff       
Sep  9 14:40:11 sh-110-04 kernel: Initializing cgroup subsys cpuset             
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;We didn&apos;t find any other LBUG so we think this is related to a specific access pattern. This comes from several jobs but similar ones.&lt;/p&gt;

&lt;p&gt;The logs do mention fir so it&apos;s against our 2.12.2_116+3 scratch filesystem. We use DoM and PFL.&lt;/p&gt;

&lt;p&gt;We&apos;ve found other similar tickets, some of them are old and marked as resolved:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2720&quot; title=&quot;osc_page_delete()) ASSERTION(0) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2720&quot;&gt;&lt;del&gt;LU-2720&lt;/del&gt;&lt;/a&gt;, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2557&quot; title=&quot;osc_page_delete()) Trying to teardown failed: -16 (EBUSY)&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2557&quot;&gt;&lt;del&gt;LU-2557&lt;/del&gt;&lt;/a&gt;, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8388&quot; title=&quot;LustreError: 59548:0:(osc_page.c:308:osc_page_delete()) ASSERTION( 0 ) failed: LBUG &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8388&quot;&gt;LU-8388&lt;/a&gt;&lt;br/&gt;
 or more recent &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12525&quot; title=&quot;sanity-flr test 200 and others asertion in osc_page_delete&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12525&quot;&gt;&lt;del&gt;LU-12525&lt;/del&gt;&lt;/a&gt; but I checked and we don&apos;t have the patch that was reverted as it was only on 2.13+ I think.&lt;/p&gt;

&lt;p&gt;Any idea? Do you think it&apos;s worth trying to upgrade our clients from 2.12.0+patches (including async_discard) to 2.12.2_116+3 like our servers?&lt;/p&gt;

&lt;p&gt;Thanks!&lt;br/&gt;
 Stephane&lt;/p&gt;</description>
                <environment>CentOS 7.6  2.12.0+patch  including async_discard for DoM. Servers: 2.12.2_116+3 patches</environment>
        <key id="56894">LU-12752</key>
            <summary>osc_page.c:osc_page_delete() ASSERTION( 0 ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="vsaveliev">Vladimir Saveliev</assignee>
                                    <reporter username="sthiell">Stephane Thiell</reporter>
                        <labels>
                    </labels>
                <created>Wed, 11 Sep 2019 21:22:30 +0000</created>
                <updated>Thu, 16 Feb 2023 14:28:54 +0000</updated>
                            <resolved>Tue, 16 Mar 2021 20:02:33 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                    <fixVersion>Lustre 2.15.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="254614" author="pfarrell" created="Thu, 12 Sep 2019 14:29:00 +0000"  >&lt;p&gt;Stephane,&lt;/p&gt;



&lt;p&gt;It sounds like you can reproduce this pretty easily.&#160; Given that, I&apos;d suggest upgrading one client and trying it again.&#160; If the problem still occurs, then given how easy it is to reproduce, we should try to reproduce it with debug and then gather a vmcore with debug enabled and upload that.&lt;/p&gt;

&lt;p&gt;Specifically, I&apos;d say turn on rpctrace, dlmtrace, and cache.&#160; (Probably OK to leave debug_mb at the default.)&lt;/p&gt;</comment>
                            <comment id="258430" author="bzzz" created="Sat, 16 Nov 2019 16:37:33 +0000"  >&lt;p&gt;hit in racer:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
Lustre: 4849:0:(llite_lib.c:2867:ll_dirty_page_discard_warn()) lustre: dirty page discard: 192.168.122.122@tcp:/lustre/fid: [0x200000403:0x6381:0x0]/ may get corrupted (rc -108)
LustreError: 11031:0:(osc_cache.c:2561:osc_teardown_async_page()) extent 00000000a1f47168@{[240 -&amp;gt; 511/1023], [2|0|+|cache|wiuY|000000004d3c9cb0], [1138688|272|+|-|0000000042a7c322|1024|          (&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)]} trunc at 240.
LustreError: 11031:0:(osc_cache.c:2561:osc_teardown_async_page()) ### extent: 00000000a1f47168 ns: lustre-OST0001-osc-ffff88015bc17000 lock: 0000000042a7c322/0x1853c4aa31a8466 lrc: 2/0,0 mode: PW/PW res: [0x4e5:0x0:0x0].0x0 rrc: 2 type: EXT [0-&amp;gt;18446744073709551615] (req 2097152-&amp;gt;18446744073709551615) flags: 0x800020000000000 nid: local remote: 0x1853c4aa31a8497 expref: -99 pid: 6809 timeout: 0 lvb_type: 1
LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) page@00000000985442eb[2 000000006a06e30a 4 1           (&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;)]

LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) vvp-page@00000000046d3436(0:0) vm@000000006118fc81 3480000001039 3:0 ffff8800d2397e00 240 lru

LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) lov-page@00000000e665113b, gen: 3

LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) osc-page@0000000058e45de8 240: 1&amp;lt; 0x845fed 258 0 + - &amp;gt; 2&amp;lt; 983040 0 4096 0x0 0x520 |           (&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;) 000000003782a010 000000004d3c9cb0 &amp;gt; 3&amp;lt; 0 0 0 &amp;gt; 4&amp;lt; 0 0 8 274124800 - | + - + - &amp;gt; 5&amp;lt; + - + - | 0 - | 272 - +&amp;gt;

LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) end page@00000000985442eb
LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) Trying to teardown failed: -16
LustreError: 11031:0:(osc_page.c:190:osc_page_delete()) ASSERTION( 0 ) failed: 
LustreError: 11031:0:(osc_page.c:190:osc_page_delete()) LBUG
Pid: 11031, comm: cat 4.18.0 #31 SMP Thu Oct 24 23:40:39 MSK 2019
Call Trace:
 dump_stack+0x85/0xc0
 ___might_sleep.cold+0xac/0xbc
 ? kmem_cache_alloc_trace+0x1b2/0x2c0
 ? libcfs_debug_dumplog_internal+0x60/0x60 [libcfs]
 ? __kthread_create_on_node+0x53/0x160
 ? kthread_create_on_node+0x34/0x40
 ? libcfs_debug_dumplog+0x8d/0x150 [libcfs]
 ? _raw_spin_unlock+0x1f/0x30
 ? wake_up_q+0x60/0x60
 ? lbug_with_loc+0x51/0x80 [libcfs]
 ? osc_page_delete+0x4ab/0x4b0 [osc]
 ? cl_page_delete0+0x6b/0x1e0 [obdclass]
 ? cl_page_delete+0x26/0xf0 [obdclass]
 ? ll_invalidatepage+0x8d/0x170 [lustre]
 ? truncate_cleanup_page+0x68/0xb0
 ? truncate_inode_pages_range+0x1b0/0x800
 ? ll_delete_inode+0x3e/0x220 [lustre]
 ? evict+0xbc/0x180
 ? __dentry_kill+0xcb/0x170
 ? dentry_kill+0x4b/0x1b0
 ? dput+0x17/0x2b0
 ? dput+0x266/0x2b0
 ? __fput+0x104/0x200
 ? task_work_run+0x7d/0xb0
 ? exit_to_usermode_loop+0xa3/0xb0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="269520" author="bzzz" created="Thu, 7 May 2020 09:40:00 +0000"  >&lt;p&gt;again in racer,&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
PID: 32147  TASK: ffff8800db844000  CPU: 0   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;cat&quot;&lt;/span&gt;
 #0 [ffff88011633bae0] panic at ffffffff810af9a3
    /home/lustre/linux-4.18.0-32.el8/kernel/panic.c: 265
 #1 [ffff88011633bb70] osc_page_delete at ffffffffa07843fe [osc]
    /home/lustre/master-mine/lustre/osc/osc_page.c: 184
 #2 [ffff88011633bbb0] cl_page_delete0 at ffffffffa01b5b8b [obdclass]
    /home/lustre/master-mine/lustre/obdclass/cl_page.c: 801
 #3 [ffff88011633bbd0] cl_page_delete at ffffffffa01b6606 [obdclass]
    /home/lustre/master-mine/libcfs/include/libcfs/libcfs_debug.h: 153
 #4 [ffff88011633bbe8] ll_invalidatepage at ffffffffa0eae23d [lustre]
    /home/lustre/master-mine/lustre/llite/rw26.c: 100
 #5 [ffff88011633bc00] truncate_cleanup_page at ffffffff8119a8c8
    /home/lustre/linux-4.18.0-32.el8/./include/linux/compiler.h: 188
 #6 [ffff88011633bc18] truncate_inode_pages_range at ffffffff8119b2d0
    /home/lustre/linux-4.18.0-32.el8/./include/linux/pagevec.h: 66
 #7 [ffff88011633bde8] ll_delete_inode at ffffffffa0e8efce [lustre]
    /home/lustre/master-mine/lustre/llite/llite_lib.c: 2265
 #8 [ffff88011633be08] evict at ffffffff81218dac
    /home/lustre/linux-4.18.0-32.el8/fs/inode.c: 563
 #9 [ffff88011633be28] __dentry_kill at ffffffff8121499b
    /home/lustre/linux-4.18.0-32.el8/fs/dcache.c: 569
#10 [ffff88011633be48] dentry_kill at ffffffff81214a8b
    /home/lustre/linux-4.18.0-32.el8/fs/dcache.c: 686
#11 [ffff88011633be70] dput at ffffffff81214e56
    /home/lustre/linux-4.18.0-32.el8/fs/dcache.c: 846
#12 [ffff88011633be90] __fput at ffffffff811fc934
    /home/lustre/linux-4.18.0-32.el8/fs/file_table.c: 255
#13 [ffff88011633bed0] task_work_run at ffffffff810cec8d
    /home/lustre/linux-4.18.0-32.el8/kernel/task_work.c: 115
#14 [ffff88011633bf08] exit_to_usermode_loop at ffffffff81001db3
    /home/lustre/linux-4.18.0-32.el8/./include/linux/tracehook.h: 193
#15 [ffff88011633bf38] do_syscall_64 at ffffffff8100265a
    /home/lustre/linux-4.18.0-32.el8/arch/x86/entry/common.c: 211
#16 [ffff88011633bf50] entry_SYSCALL_64_after_hwframe at ffffffff818000ae
    /home/lustre/linux-4.18.0-32.el8/arch/x86/entry/entry_64.S: 242
    RIP: 00007f00025ab2ab  RSP: 00007ffed735b348  RFLAGS: 00000202
    RAX: 0000000000000000  RBX: 00007f000267b780  RCX: 00007f00025ab2ab
    RDX: 00007f0002676740  RSI: 0000000000000001  RDI: 0000000000000001
    RBP: 0000000000000000   R8: 00007f0002681580   R9: 0000000000000001
    R10: 00007f0002681580  R11: 0000000000000202  R12: 00007f0002677340
    R13: 0000000000000002  R14: 00007f000267f448  R15: 00007f000267bd80
    ORIG_RAX: 0000000000000003  CS: 0033  SS: 002b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="290931" author="adilger" created="Tue, 2 Feb 2021 05:50:10 +0000"  >&lt;p&gt;Hit this a few times in racer with patch &lt;a href=&quot;https://review.whamcloud.com/13669&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/13669&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7073&quot; title=&quot;racer with OST object migration hangs on cleanup&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7073&quot;&gt;&lt;del&gt;LU-7073&lt;/del&gt;&lt;/a&gt; tests: Add file migration to racer&lt;/tt&gt;&quot; with the same &quot;&lt;tt&gt;trunc at 16&lt;/tt&gt;&quot; message. e.g.:&lt;br/&gt;
&lt;a href=&quot;https://testing-archive.whamcloud.com/gerrit-janitor/13887/testresults/racer-special7-ldiskfs-centos7_x86_64-centos7_x86_64/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing-archive.whamcloud.com/gerrit-janitor/13887/testresults/racer-special7-ldiskfs-centos7_x86_64-centos7_x86_64/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="293433" author="gerrit" created="Mon, 1 Mar 2021 15:01:48 +0000"  >&lt;p&gt;Vladimir Saveliev (c17830@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41797&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41797&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12752&quot; title=&quot;osc_page.c:osc_page_delete() ASSERTION( 0 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12752&quot;&gt;&lt;del&gt;LU-12752&lt;/del&gt;&lt;/a&gt; mdt: mdt_commitrw_write() - return 0 for dying object&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 2fbc18994e48c3cf562d25f1e9d1975a26418e1f&lt;/p&gt;</comment>
                            <comment id="293434" author="vsaveliev" created="Mon, 1 Mar 2021 15:04:48 +0000"  >&lt;blockquote&gt;
&lt;p&gt;LustreError: 11031:0:(osc_page.c:188:osc_page_delete()) Trying to teardown failed: -16&lt;br/&gt;
LustreError: 11031:0:(osc_page.c:190:osc_page_delete()) ASSERTION( 0 ) failed: &lt;br/&gt;
LustreError: 11031:0:(osc_page.c:190:osc_page_delete()) LBUG&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Such LBUG may occur as result of race between write RPC and close of unlinked file in case of PFL files. See &lt;a href=&quot;https://review.whamcloud.com/41797&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41797&lt;/a&gt; for more details.&lt;/p&gt;</comment>
                            <comment id="295148" author="gerrit" created="Tue, 16 Mar 2021 18:16:23 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/41797/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41797/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12752&quot; title=&quot;osc_page.c:osc_page_delete() ASSERTION( 0 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12752&quot;&gt;&lt;del&gt;LU-12752&lt;/del&gt;&lt;/a&gt; mdt: commitrw_write() - check dying object under lock&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: d48a0ebb5a8d5d49684325434b503e8aab085397&lt;/p&gt;</comment>
                            <comment id="295170" author="pjones" created="Tue, 16 Mar 2021 20:02:33 +0000"  >&lt;p&gt;Landed for 2.15&lt;/p&gt;</comment>
                            <comment id="338640" author="bzzz" created="Fri, 24 Jun 2022 08:52:16 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
LustreError: 276562:0:(osc_page.c:183:osc_page_delete()) ASSERTION( 0 ) failed: in sanity / 273b

Trace:

PID: 276562  TASK: ffff98be20881d80  CPU: 0   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;multiop&quot;&lt;/span&gt;
 #0 [ffff98bef366fad0] panic at ffffffffb90af786
    /tmp/kernel/kernel/panic.c: 299
 #1 [ffff98bef366fb60] osc_page_delete at ffffffffc08bf614 [osc]
    /home/lustre/master-mine/lustre/osc/osc_page.c: 183
 #2 [ffff98bef366fba0] cl_page_delete0 at ffffffffc02cf56a [obdclass]
    /home/lustre/master-mine/lustre/obdclass/cl_page.c: 842
 #3 [ffff98bef366fbc0] cl_page_delete at ffffffffc02cfe65 [obdclass]
    /home/lustre/master-mine/libcfs/include/libcfs/libcfs_debug.h: 155
 #4 [ffff98bef366fbd8] ll_invalidatepage at ffffffffc193f095 [lustre]
    /home/lustre/master-mine/lustre/llite/rw26.c: 99
 #5 [ffff98bef366fbf8] truncate_cleanup_page at ffffffffb9160e8a
    /tmp/kernel/./include/linux/compiler.h: 276
 #6 [ffff98bef366fc10] truncate_inode_pages_range at ffffffffb9161892
    /tmp/kernel/./include/linux/pagevec.h: 68
 #7 [ffff98bef366fdd8] ll_truncate_inode_pages_final at ffffffffc191bf63 [lustre]
    /home/lustre/master-mine/lustre/llite/llite_lib.c: 2767
 #8 [ffff98bef366fdf8] ll_delete_inode at ffffffffc191c3f3 [lustre]
    /home/lustre/master-mine/lustre/llite/llite_lib.c: 2851
 #9 [ffff98bef366fe08] evict at ffffffffb91e1e6c
    /tmp/kernel/fs/inode.c: 569
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;is this a different issue?&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="72934">LU-16263</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00mnr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>