<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:57:30 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-13000] MDS hit (osd_handler.c:2165:osd_object_release()) ASSERTION( !(o-&gt;oo_destroyed == 0 &amp;&amp; o-&gt;oo_inode &amp;&amp; o-&gt;oo_inode -&gt;i_nlink == 0) ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-13000</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;2 of 4 MDSs hit this issue after restart SOAK. SOAK was restarted due to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12990&quot; title=&quot;MDS failed to mount during failover&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12990&quot;&gt;LU-12990&lt;/a&gt;, after restart, without cleaning the update_log and did mds failover, it hit the problem right away. The restart process are:&lt;br/&gt;
1. stop soak&lt;br/&gt;
2. umount everything&lt;br/&gt;
3. reboot&lt;br/&gt;
4. mount&lt;br/&gt;
5. restart soak&lt;/p&gt;

&lt;p&gt;soak-9 console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 1731.742403] Lustre: soaked-MDT0002-osp-MDT0001: Connection restored to 192.168.1.110@o2ib (at 192.168.1.110@o2ib)
[ 1731.753872] Lustre: Skipped 2 previous similar messages
[ 1736.627026] Lustre: soaked-MDT0001: Recovery over after 5:49, of 12 clients 3 recovered and 9 were evicted.
[23485.235434] LustreError: 5279:0:(osd_handler.c:2165:osd_object_release()) ASSERTION( !(o-&amp;gt;oo_destroyed == 0 &amp;amp;&amp;amp; o-&amp;gt;oo_inode &amp;amp;&amp;amp; o-&amp;gt;oo_inode
-&amp;gt;i_nlink == 0) ) failed:
[23485.251769] LustreError: 5279:0:(osd_handler.c:2165:osd_object_release()) LBUG
[23485.259857] Pid: 5279, comm: mdt01_001 3.10.0-1062.1.1.el7_lustre.x86_64 #1 SMP Fri Nov 8 18:37:40 UTC 2019
[23485.270743] Call Trace:
[23485.273503]  [&amp;lt;ffffffffc0dab8ac&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[23485.280832]  [&amp;lt;ffffffffc0dab95c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[23485.287762]  [&amp;lt;ffffffffc169466c&amp;gt;] osd_object_release+0x7c/0x80 [osd_ldiskfs]
[23485.295670]  [&amp;lt;ffffffffc0eef0a8&amp;gt;] lu_object_put+0x198/0x3e0 [obdclass]
[23485.303009]  [&amp;lt;ffffffffc0eb3a8a&amp;gt;] llog_osd_regular_fid_add_name_entry+0x27a/0x500 [obdclass]
[23485.312472]  [&amp;lt;ffffffffc0eb4a5f&amp;gt;] llog_osd_declare_create+0x3af/0x710 [obdclass]
[23485.320769]  [&amp;lt;ffffffffc0ea07f5&amp;gt;] llog_declare_create+0x75/0x1f0 [obdclass]
[23485.328579]  [&amp;lt;ffffffffc0ea6fbd&amp;gt;] llog_cat_prep_log+0x11d/0x360 [obdclass]
[23485.336290]  [&amp;lt;ffffffffc0ea7260&amp;gt;] llog_cat_declare_add_rec+0x60/0x260 [obdclass]
[23485.344586]  [&amp;lt;ffffffffc0e9e178&amp;gt;] llog_declare_add+0x78/0x1a0 [obdclass]
[23485.352100]  [&amp;lt;ffffffffc124c52e&amp;gt;] top_trans_start+0x17e/0x940 [ptlrpc]
[23485.359490]  [&amp;lt;ffffffffc18ce2b4&amp;gt;] lod_trans_start+0x34/0x40 [lod]
[23485.366340]  [&amp;lt;ffffffffc1989f9a&amp;gt;] mdd_trans_start+0x1a/0x20 [mdd]
[23485.373197]  [&amp;lt;ffffffffc196e2f2&amp;gt;] mdd_create+0xbe2/0x1630 [mdd]
[23485.379838]  [&amp;lt;ffffffffc17f8e84&amp;gt;] mdt_create+0xb54/0x10e0 [mdt]
[23485.386519]  [&amp;lt;ffffffffc17f957b&amp;gt;] mdt_reint_create+0x16b/0x360 [mdt]
[23485.393643]  [&amp;lt;ffffffffc17feab3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[23485.400378]  [&amp;lt;ffffffffc17d89e0&amp;gt;] mdt_reint_internal+0x7b0/0xba0 [mdt]
[23485.407703]  [&amp;lt;ffffffffc17e46d7&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[23485.414052]  [&amp;lt;ffffffffc123b83a&amp;gt;] tgt_request_handle+0x98a/0x1630 [ptlrpc]
[23485.421781]  [&amp;lt;ffffffffc11dda96&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[23485.430384]  [&amp;lt;ffffffffc11e15cc&amp;gt;] ptlrpc_main+0xbac/0x1540 [ptlrpc]
[23485.437434]  [&amp;lt;ffffffff848c50d1&amp;gt;] kthread+0xd1/0xe0
[23485.442910]  [&amp;lt;ffffffff84f8cd37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[23485.449927]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[23485.455515] Kernel panic - not syncing: LBUG
[23485.460280] CPU: 12 PID: 5279 Comm: mdt01_001 Kdump: loaded Tainted: G           OE  ------------   3.10.0-1062.1.1.el7_lustre.x86_64 #1
[23485.473964] Hardware name: Intel Corporation S2600GZ ........../S2600GZ, BIOS SE5C600.86B.01.08.0003.022620131521 02/26/2013
[23485.486499] Call Trace:
[23485.489230]  [&amp;lt;ffffffff84f792c2&amp;gt;] dump_stack+0x19/0x1b
[23485.494965]  [&amp;lt;ffffffff84f72941&amp;gt;] panic+0xe8/0x21f
[23485.500316]  [&amp;lt;ffffffffc0dab9ab&amp;gt;] lbug_with_loc+0x9b/0xa0 [libcfs]
[23485.507217]  [&amp;lt;ffffffffc169466c&amp;gt;] osd_object_release+0x7c/0x80 [osd_ldiskfs]
[23485.515102]  [&amp;lt;ffffffffc0eef0a8&amp;gt;] lu_object_put+0x198/0x3e0 [obdclass]
[23485.522401]  [&amp;lt;ffffffffc0eb3a8a&amp;gt;] llog_osd_regular_fid_add_name_entry+0x27a/0x500 [obdclass]
[23485.531833]  [&amp;lt;ffffffffc0eb4a5f&amp;gt;] llog_osd_declare_create+0x3af/0x710 [obdclass]
[23485.540099]  [&amp;lt;ffffffffc0ea07f5&amp;gt;] llog_declare_create+0x75/0x1f0 [obdclass]
[23485.547881]  [&amp;lt;ffffffffc0ea6fbd&amp;gt;] llog_cat_prep_log+0x11d/0x360 [obdclass]
[23485.555566]  [&amp;lt;ffffffffc0ea7260&amp;gt;] llog_cat_declare_add_rec+0x60/0x260 [obdclass]
[23485.563832]  [&amp;lt;ffffffffc0e9e178&amp;gt;] llog_declare_add+0x78/0x1a0 [obdclass]
[23485.571336]  [&amp;lt;ffffffffc124c52e&amp;gt;] top_trans_start+0x17e/0x940 [ptlrpc]
[23485.578627]  [&amp;lt;ffffffffc18ce2b4&amp;gt;] lod_trans_start+0x34/0x40 [lod]
[23485.585432]  [&amp;lt;ffffffffc1989f9a&amp;gt;] mdd_trans_start+0x1a/0x20 [mdd]
[23485.592239]  [&amp;lt;ffffffffc196e2f2&amp;gt;] mdd_create+0xbe2/0x1630 [mdd]
[23485.598859]  [&amp;lt;ffffffffc17f8e84&amp;gt;] mdt_create+0xb54/0x10e0 [mdt]
[23485.605489]  [&amp;lt;ffffffffc0ecc3c4&amp;gt;] ? lprocfs_stats_lock+0x24/0xd0 [obdclass]
[23485.613270]  [&amp;lt;ffffffffc17f957b&amp;gt;] mdt_reint_create+0x16b/0x360 [mdt]
[23485.620370]  [&amp;lt;ffffffffc17feab3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[23485.627080]  [&amp;lt;ffffffffc17d89e0&amp;gt;] mdt_reint_internal+0x7b0/0xba0 [mdt]
[23485.634378]  [&amp;lt;ffffffffc17e1f67&amp;gt;] ? mdt_thread_info_init+0xa7/0x1e0 [mdt]
[23485.641963]  [&amp;lt;ffffffffc17e46d7&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[23485.648312]  [&amp;lt;ffffffffc123b83a&amp;gt;] tgt_request_handle+0x98a/0x1630 [ptlrpc]
[23485.656022]  [&amp;lt;ffffffffc1212c41&amp;gt;] ? ptlrpc_nrs_req_get_nolock0+0xd1/0x170 [ptlrpc]
[23485.664474]  [&amp;lt;ffffffffc0dabcbe&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[23485.672372]  [&amp;lt;ffffffffc11dda96&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[23485.680951]  [&amp;lt;ffffffffc11da4e1&amp;gt;] ? ptlrpc_wait_event+0xd1/0x3a0 [ptlrpc]
[23485.688535]  [&amp;lt;ffffffff848d2643&amp;gt;] ? __wake_up+0x13/0x20
[23485.694392]  [&amp;lt;ffffffffc11e15cc&amp;gt;] ptlrpc_main+0xbac/0x1540 [ptlrpc]
[23485.701412]  [&amp;lt;ffffffffc11e0a20&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[23485.709666]  [&amp;lt;ffffffff848c50d1&amp;gt;] kthread+0xd1/0xe0
[23485.715115]  [&amp;lt;ffffffff848c5000&amp;gt;] ? insert_kthread_work+0x40/0x40
[23485.721914]  [&amp;lt;ffffffff84f8cd37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[23485.729199]  [&amp;lt;ffffffff848c5000&amp;gt;] ? insert_kthread_work+0x40/0x40
[    0.000000] Initializing cgroup subsys cpuset
[    0.000000] Initializing cgroup subsys cpu
[    0.000000] Initializing cgroup subsys cpuacct
[    0.000000] Linux version 3.10.0-1062.1.1.el7_lustre.x86_64 (jenkins@trevis-306-el7-x8664-2.trevis.whamcloud.com) (gcc version 4.8.5 20150623 (Red Hat 4.8.5-36) (GCC) ) #1 SMP Fri Nov 8 18:37:40 UTC 2019
[    0.000000] Command line: BOOT_IMAGE=/boot/vmlinuz-3.10.0-1062.1.1.el7_lustre.x86_64 ro console=ttyS0,115200 irqpoll nr_cpus=1 reset_devices cgroup_disable=memory mce=off numa=off udev.children-max=2 panic=10 rootflags=nofail acpi_no_memhotplug transparent_hugepage=never nokaslr novmcoredd disable_cpu_apicid=0 elfcorehdr=869816K
[    0.000000] e820: BIOS-provided physical RAM map:
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;soak-10 console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2564.640197] Lustre: soaked-MDT0002: disconnecting 9 stale clients
[ 2564.727697] Lustre: soaked-MDT0002: Recovery over after 5:00, of 12 clients 3 recovered and 9 were evicted.
[ 2567.239742] Lustre: soaked-MDT0000-osp-MDT0002: Connection restored to 192.168.1.108@o2ib (at 192.168.1.108@o2ib)
[ 2567.251222] Lustre: Skipped 8 previous similar messages
[24318.991635] LustreError: 5459:0:(osd_handler.c:2165:osd_object_release()) ASSERTION( !(o-&amp;gt;oo_destroyed == 0 &amp;amp;&amp;amp; o-&amp;gt;oo_inode &amp;amp;&amp;amp; o-&amp;gt;oo_inode
-&amp;gt;i_nlink == 0) ) failed: 
[24319.007997] LustreError: 5459:0:(osd_handler.c:2165:osd_object_release()) LBUG
[24319.016083] Pid: 5459, comm: mdt_out01_002 3.10.0-1062.1.1.el7_lustre.x86_64 #1 SMP Fri Nov 8 18:37:40 UTC 2019
[24319.027375] Call Trace:
[24319.030151]  [&amp;lt;ffffffffc0c7d8ac&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[24319.037494]  [&amp;lt;ffffffffc0c7d95c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[24319.044452]  [&amp;lt;ffffffffc158166c&amp;gt;] osd_object_release+0x7c/0x80 [osd_ldiskfs]
[24319.052389]  [&amp;lt;ffffffffc0dc10a8&amp;gt;] lu_object_put+0x198/0x3e0 [obdclass]
[24319.059807]  [&amp;lt;ffffffffc1110b9c&amp;gt;] out_tx_end+0x1ec/0x5c0 [ptlrpc]
[24319.066758]  [&amp;lt;ffffffffc1114d52&amp;gt;] out_handle+0x1442/0x1bb0 [ptlrpc]
[24319.073859]  [&amp;lt;ffffffffc110d83a&amp;gt;] tgt_request_handle+0x98a/0x1630 [ptlrpc]
[24319.081638]  [&amp;lt;ffffffffc10afa96&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[24319.090301]  [&amp;lt;ffffffffc10b35cc&amp;gt;] ptlrpc_main+0xbac/0x1540 [ptlrpc]
[24319.097391]  [&amp;lt;ffffffff8bcc50d1&amp;gt;] kthread+0xd1/0xe0
[24319.102906]  [&amp;lt;ffffffff8c38cd37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[24319.109926]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[24319.115541] Kernel panic - not syncing: LBUG
[24319.120317] CPU: 12 PID: 5459 Comm: mdt_out01_002 Kdump: loaded Tainted: G           OE  ------------   3.10.0-1062.1.1.el7_lustre.x86_64 #1
[24319.134391] Hardware name: Intel Corporation S2600GZ ........../S2600GZ, BIOS SE5C600.86B.01.08.0003.022620131521 02/26/2013
[24319.146920] Call Trace:
[24319.149656]  [&amp;lt;ffffffff8c3792c2&amp;gt;] dump_stack+0x19/0x1b
[24319.155410]  [&amp;lt;ffffffff8c372941&amp;gt;] panic+0xe8/0x21f
[24319.160762]  [&amp;lt;ffffffffc0c7d9ab&amp;gt;] lbug_with_loc+0x9b/0xa0 [libcfs]
[24319.167668]  [&amp;lt;ffffffffc158166c&amp;gt;] osd_object_release+0x7c/0x80 [osd_ldiskfs]
[24319.175562]  [&amp;lt;ffffffffc0dc10a8&amp;gt;] lu_object_put+0x198/0x3e0 [obdclass]
[24319.182897]  [&amp;lt;ffffffffc1110b9c&amp;gt;] out_tx_end+0x1ec/0x5c0 [ptlrpc]
[24319.189744]  [&amp;lt;ffffffffc1114d52&amp;gt;] out_handle+0x1442/0x1bb0 [ptlrpc]
[24319.196784]  [&amp;lt;ffffffffc110d83a&amp;gt;] tgt_request_handle+0x98a/0x1630 [ptlrpc]
[24319.204509]  [&amp;lt;ffffffffc10e4c41&amp;gt;] ? ptlrpc_nrs_req_get_nolock0+0xd1/0x170 [ptlrpc]
[24319.212965]  [&amp;lt;ffffffffc0c7dcbe&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[24319.220877]  [&amp;lt;ffffffffc10afa96&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[24319.229471]  [&amp;lt;ffffffffc10ac4e1&amp;gt;] ? ptlrpc_wait_event+0xd1/0x3a0 [ptlrpc]
[24319.237076]  [&amp;lt;ffffffff8bcd2643&amp;gt;] ? __wake_up+0x13/0x20
[24319.242946]  [&amp;lt;ffffffffc10b35cc&amp;gt;] ptlrpc_main+0xbac/0x1540 [ptlrpc]
[24319.249988]  [&amp;lt;ffffffffc10b2a20&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[24319.258245]  [&amp;lt;ffffffff8bcc50d1&amp;gt;] kthread+0xd1/0xe0
[24319.263695]  [&amp;lt;ffffffff8bcc5000&amp;gt;] ? insert_kthread_work+0x40/0x40
[24319.270500]  [&amp;lt;ffffffff8c38cd37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[24319.277791]  [&amp;lt;ffffffff8bcc5000&amp;gt;] ? insert_kthread_work+0x40/0x40
[    0.000000] Initializing cgroup subsys cpuset
[    0.000000] Initializing cgroup subsys cpu
[    0.000000] Initializing cgroup subsys cpuacct
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>b2.13-ib #2</environment>
        <key id="57443">LU-13000</key>
            <summary>MDS hit (osd_handler.c:2165:osd_object_release()) ASSERTION( !(o-&gt;oo_destroyed == 0 &amp;&amp; o-&gt;oo_inode &amp;&amp; o-&gt;oo_inode -&gt;i_nlink == 0) ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="sarah">Sarah Liu</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Fri, 22 Nov 2019 18:41:17 +0000</created>
                <updated>Fri, 4 Dec 2020 03:51:32 +0000</updated>
                            <resolved>Fri, 4 Dec 2020 03:51:32 +0000</resolved>
                                    <version>Lustre 2.13.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="286704" author="adilger" created="Fri, 4 Dec 2020 03:51:32 +0000"  >&lt;p&gt;This assertion is removed with the patch &lt;a href=&quot;https://review.whamcloud.com/40058&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40058&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13980&quot; title=&quot;Kernel panic on OST after removing files under &amp;#39;/O&amp;#39; folder&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13980&quot;&gt;LU-13980&lt;/a&gt; osd: remove osd_object_release LASSERT&lt;/tt&gt;&quot;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="60906">LU-13980</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00ptj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>