<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:47:03 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11801] replay-vbr test 0b crashes with and LBUG/ASSERTION( ctxt )</title>
                <link>https://jira.whamcloud.com/browse/LU-11801</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;replay-vbr test_0b crashes for Ubuntu 18.04 clients with RHEL 7.6 servers. This test started crashing on 27 November 2018.&lt;/p&gt;

&lt;p&gt;Looking at the kernel crash from &lt;a href=&quot;https://testing.whamcloud.com/test_sets/9f692e08-fdc9-11e8-93ea-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/9f692e08-fdc9-11e8-93ea-52540065bddc&lt;/a&gt; , we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 5308.450564] Lustre: DEBUG MARKER: == replay-vbr test 0b: getversion for non existent fid shouldn&apos;t cause kernel panic ================== 21:08:17 (1544562497)
[ 5308.527820] LustreError: 12286:0:(osp_sync.c:346:osp_sync_declare_add()) ASSERTION( ctxt ) failed: 
[ 5308.528714] LustreError: 12286:0:(osp_sync.c:346:osp_sync_declare_add()) LBUG
[ 5308.529382] Pid: 12286, comm: mdt00_000 3.10.0-957.el7_lustre.x86_64 #1 SMP Sat Dec 8 05:53:16 UTC 2018
[ 5308.530265] Call Trace:
[ 5308.530534]  [&amp;lt;ffffffffc079d7cc&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[ 5308.531258]  [&amp;lt;ffffffffc079d87c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[ 5308.531885]  [&amp;lt;ffffffffc11e0b89&amp;gt;] osp_sync_declare_add+0x3b9/0x3f0 [osp]
[ 5308.532569]  [&amp;lt;ffffffffc11d0ce3&amp;gt;] osp_declare_destroy+0x1a3/0x1f0 [osp]
[ 5308.533334]  [&amp;lt;ffffffffc111a85e&amp;gt;] lod_sub_declare_destroy+0xce/0x2d0 [lod]
[ 5308.534219]  [&amp;lt;ffffffffc10f7a3d&amp;gt;] lod_obj_stripe_destroy_cb+0x8d/0xa0 [lod]
[ 5308.534955]  [&amp;lt;ffffffffc110423e&amp;gt;] lod_obj_for_each_stripe+0x11e/0x2d0 [lod]
[ 5308.535718]  [&amp;lt;ffffffffc110504f&amp;gt;] lod_declare_destroy+0x45f/0x5e0 [lod]
[ 5308.536452]  [&amp;lt;ffffffffc116b081&amp;gt;] mdd_declare_finish_unlink+0x91/0x210 [mdd]
[ 5308.537193]  [&amp;lt;ffffffffc117a9af&amp;gt;] mdd_unlink+0x4bf/0xad0 [mdd]
[ 5308.537829]  [&amp;lt;ffffffffc1043089&amp;gt;] mdo_unlink+0x46/0x48 [mdt]
[ 5308.538539]  [&amp;lt;ffffffffc1005e69&amp;gt;] mdt_reint_unlink+0xb49/0x14a0 [mdt]
[ 5308.539308]  [&amp;lt;ffffffffc100c5e3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[ 5308.539937]  [&amp;lt;ffffffffc0fe9133&amp;gt;] mdt_reint_internal+0x6e3/0xaf0 [mdt]
[ 5308.540621]  [&amp;lt;ffffffffc0ff4497&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[ 5308.541262]  [&amp;lt;ffffffffc0c8535a&amp;gt;] tgt_request_handle+0xaea/0x1580 [ptlrpc]
[ 5308.542296]  [&amp;lt;ffffffffc0c2992b&amp;gt;] ptlrpc_server_handle_request+0x24b/0xab0 [ptlrpc]
[ 5308.543087]  [&amp;lt;ffffffffc0c2d25c&amp;gt;] ptlrpc_main+0xafc/0x1fc0 [ptlrpc]
[ 5308.543835]  [&amp;lt;ffffffff9bcc1c31&amp;gt;] kthread+0xd1/0xe0
[ 5308.544389]  [&amp;lt;ffffffff9c374c37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[ 5308.545029]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[ 5308.545592] Kernel panic - not syncing: LBUG
[ 5308.546008] CPU: 0 PID: 12286 Comm: mdt00_000 Kdump: loaded Tainted: G           OE  ------------   3.10.0-957.el7_lustre.x86_64 #1
[ 5308.547086] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 5308.547649] Call Trace:
[ 5308.547916]  [&amp;lt;ffffffff9c361dc1&amp;gt;] dump_stack+0x19/0x1b
[ 5308.548409]  [&amp;lt;ffffffff9c35b4d0&amp;gt;] panic+0xe8/0x21f
[ 5308.548865]  [&amp;lt;ffffffffc079d8cb&amp;gt;] lbug_with_loc+0x9b/0xa0 [libcfs]
[ 5308.549448]  [&amp;lt;ffffffffc11e0b89&amp;gt;] osp_sync_declare_add+0x3b9/0x3f0 [osp]
[ 5308.550080]  [&amp;lt;ffffffffc11d0ce3&amp;gt;] osp_declare_destroy+0x1a3/0x1f0 [osp]
[ 5308.550705]  [&amp;lt;ffffffffc111a85e&amp;gt;] lod_sub_declare_destroy+0xce/0x2d0 [lod]
[ 5308.551377]  [&amp;lt;ffffffffc10f7a3d&amp;gt;] lod_obj_stripe_destroy_cb+0x8d/0xa0 [lod]
[ 5308.552040]  [&amp;lt;ffffffffc110423e&amp;gt;] lod_obj_for_each_stripe+0x11e/0x2d0 [lod]
[ 5308.552697]  [&amp;lt;ffffffffc110504f&amp;gt;] lod_declare_destroy+0x45f/0x5e0 [lod]
[ 5308.553459]  [&amp;lt;ffffffffc09e4ca4&amp;gt;] ? lu_env_refill+0x24/0x30 [obdclass]
[ 5308.554081]  [&amp;lt;ffffffffc10f79b0&amp;gt;] ? lod_xattr_list+0x150/0x150 [lod]
[ 5308.554674]  [&amp;lt;ffffffffc116b081&amp;gt;] mdd_declare_finish_unlink+0x91/0x210 [mdd]
[ 5308.555363]  [&amp;lt;ffffffffc117a9af&amp;gt;] mdd_unlink+0x4bf/0xad0 [mdd]
[ 5308.555929]  [&amp;lt;ffffffffc1043089&amp;gt;] mdo_unlink+0x46/0x48 [mdt]
[ 5308.556469]  [&amp;lt;ffffffffc1005e69&amp;gt;] mdt_reint_unlink+0xb49/0x14a0 [mdt]
[ 5308.557088]  [&amp;lt;ffffffffc100c5e3&amp;gt;] mdt_reint_rec+0x83/0x210 [mdt]
[ 5308.557663]  [&amp;lt;ffffffffc0fe9133&amp;gt;] mdt_reint_internal+0x6e3/0xaf0 [mdt]
[ 5308.558293]  [&amp;lt;ffffffffc0ff13f4&amp;gt;] ? mdt_thread_info_init+0xa4/0x1e0 [mdt]
[ 5308.558933]  [&amp;lt;ffffffffc0ff4497&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[ 5308.559512]  [&amp;lt;ffffffffc0c8535a&amp;gt;] tgt_request_handle+0xaea/0x1580 [ptlrpc]
[ 5308.560184]  [&amp;lt;ffffffffc07a3f07&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[ 5308.560834]  [&amp;lt;ffffffffc0c2992b&amp;gt;] ptlrpc_server_handle_request+0x24b/0xab0 [ptlrpc]
[ 5308.561558]  [&amp;lt;ffffffff9bccba9b&amp;gt;] ? __wake_up_common+0x5b/0x90
[ 5308.562151]  [&amp;lt;ffffffffc0c2d25c&amp;gt;] ptlrpc_main+0xafc/0x1fc0 [ptlrpc]
[ 5308.562750]  [&amp;lt;ffffffff9bcd0880&amp;gt;] ? finish_task_switch+0x50/0x1c0
[ 5308.563382]  [&amp;lt;ffffffffc0c2c760&amp;gt;] ? ptlrpc_register_service+0xf80/0xf80 [ptlrpc]
[ 5308.564084]  [&amp;lt;ffffffff9bcc1c31&amp;gt;] kthread+0xd1/0xe0
[ 5308.564541]  [&amp;lt;ffffffff9bcc1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 5308.565108]  [&amp;lt;ffffffff9c374c37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 5308.565710]  [&amp;lt;ffffffff9bcc1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There are several example of this crash&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/375d7040-fdc8-11e8-b837-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/375d7040-fdc8-11e8-b837-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/54b126fe-f955-11e8-b67f-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/54b126fe-f955-11e8-b67f-52540065bddc&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/cec17f86-f6e7-11e8-815b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/cec17f86-f6e7-11e8-815b-52540065bddc&lt;/a&gt;&lt;/p&gt;

</description>
                <environment>Ubuntu 18.04</environment>
        <key id="54331">LU-11801</key>
            <summary>replay-vbr test 0b crashes with and LBUG/ASSERTION( ctxt )</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>ubuntu</label>
                    </labels>
                <created>Mon, 17 Dec 2018 23:39:28 +0000</created>
                <updated>Wed, 15 Apr 2020 17:41:18 +0000</updated>
                            <resolved>Wed, 15 Apr 2020 17:41:18 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="238779" author="green" created="Tue, 18 Dec 2018 19:05:42 +0000"  >&lt;p&gt;This assertion seems to be a 100% match to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9337&quot; title=&quot;LBUG replay-single test_0b: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9337&quot;&gt;LU-9337&lt;/a&gt;, the stack trace also matches.&lt;/p&gt;</comment>
                            <comment id="238781" author="pjones" created="Tue, 18 Dec 2018 19:08:13 +0000"  >&lt;p&gt;Alex&lt;/p&gt;

&lt;p&gt;Could you please assess this issue?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="238791" author="bzzz" created="Tue, 18 Dec 2018 19:45:12 +0000"  >&lt;p&gt;checking the logs. it makes sense to mention that in all reported cases replay-dual precede and failed with very similar symptom:&lt;br/&gt;
&quot;Restart of mds1 failed!&quot; and the following in the log:&lt;/p&gt;

&lt;p&gt;+ pm -h powerman --off trevis-27vm8&lt;br/&gt;
/usr/lib64/lustre/tests/test-framework.sh: line 2470: pm: command not found&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 4 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 3 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 2 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 1 secs left ...&lt;br/&gt;
waiting for trevis-27vm8 to fail attempts=3&lt;br/&gt;
+ pm -h powerman --off trevis-27vm8&lt;br/&gt;
/usr/lib64/lustre/tests/test-framework.sh: line 2470: pm: command not found&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 4 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 3 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 2 secs left ...&lt;br/&gt;
waiting ! ping -w 3 -c 1 trevis-27vm8, 1 secs left ...&lt;br/&gt;
waiting for trevis-27vm8 to fail attempts=3&lt;br/&gt;
trevis-27vm8 still pingable after power down! attempts=3&lt;br/&gt;
reboot facets: mds1&lt;br/&gt;
+ pm -h powerman --on trevis-27vm8&lt;br/&gt;
/usr/lib64/lustre/tests/test-framework.sh: line 2560: pm: command not found&lt;br/&gt;
Failover mds1 to trevis-27vm7&lt;br/&gt;
03:17:22 (1543807042) waiting for trevis-27vm7 network 900 secs ...&lt;br/&gt;
03:17:22 (1543807042) network interface is UP&lt;br/&gt;
CMD: trevis-27vm7 hostname&lt;br/&gt;
mount facets: mds1&lt;br/&gt;
CMD: trevis-27vm7 dmsetup status /dev/mapper/mds1_flakey &amp;gt;/dev/null 2&amp;gt;&amp;amp;1&lt;br/&gt;
CMD: trevis-27vm7 dmsetup status /dev/mapper/mds1_flakey 2&amp;gt;&amp;amp;1&lt;br/&gt;
CMD: trevis-27vm7 dmsetup table /dev/mapper/mds1_flakey&lt;br/&gt;
CMD: trevis-27vm7 dmsetup suspend --nolockfs --noflush /dev/mapper/mds1_flakey&lt;br/&gt;
CMD: trevis-27vm7 dmsetup load /dev/mapper/mds1_flakey --table \&quot;0 20971520 linear 252:0 0\&quot;&lt;br/&gt;
CMD: trevis-27vm7 dmsetup resume /dev/mapper/mds1_flakey&lt;br/&gt;
CMD: trevis-27vm7 test -b /dev/mapper/mds1_flakey&lt;br/&gt;
CMD: trevis-27vm7 e2label /dev/mapper/mds1_flakey&lt;br/&gt;
Starting mds1: /dev/mapper/mds1_flakey /mnt/lustre-mds1&lt;br/&gt;
CMD: trevis-27vm7 mkdir -p /mnt/lustre-mds1; mount -t lustre /dev/mapper/mds1_flakey /mnt/lustre-mds1&lt;br/&gt;
trevis-27vm7: mount.lustre: according to /etc/mtab /dev/mapper/mds1_flakey is already mounted on /mnt/lustre-mds1&lt;br/&gt;
Start of /dev/mapper/mds1_flakey on mds1 failed 17&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="267735" author="bzzz" created="Wed, 15 Apr 2020 17:41:18 +0000"  >&lt;p&gt;a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12674&quot; title=&quot;osp should handle -EINPROGRESS on llog objects&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12674&quot;&gt;&lt;del&gt;LU-12674&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="45480">LU-9337</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i0089b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>