<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:38:28 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10820] Interop 2.10.3 &lt;-&gt;2.11 sanity test_276: (dt_object.h:2509:dt_statfs()) ASSERTION( dev ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-10820</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2a35151a-2751-11e8-b74b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2a35151a-2751-11e8-b74b-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_276 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Timeout occurred after 175 mins, last suite running was sanity, restarting cluster to continue tests
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Hit LBUG in interop testing between 2.10.3 server and master tag-2.10.59 client as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10650&quot; title=&quot;cslco1705 crash: dt_statfs()) ASSERTION( dev ) failed: LBUG, Pid: 3372, comm: lctl&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10650&quot;&gt;&lt;del&gt;LU-10650&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
[ 6845.114788] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanity test 276: Race between mount and obd_statfs ================================================ 01:42:10 \(1520905330\)
[ 6845.289408] Lustre: DEBUG MARKER: == sanity test 276: Race between mount and obd_statfs ================================================ 01:42:10 (1520905330)
[ 6845.483951] Lustre: DEBUG MARKER: (while true; do /usr/sbin/lctl get_param obdfilter.*.filesfree &amp;gt; /dev/null 2&amp;gt;&amp;amp;1; done) &amp;amp; pid=$!; echo $pid &amp;gt; /tmp/sanity_276_pid
[ 6845.485916] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost1&apos; &apos; /proc/mounts || true
[ 6845.798565] Lustre: DEBUG MARKER: umount -d /mnt/lustre-ost1
[ 6846.021507] Lustre: Failing over lustre-OST0000
[ 6846.065295] Lustre: server umount lustre-OST0000 complete
[ 6846.241344] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp;
[ 6846.241344] lctl dl | grep &apos; ST &apos; || true
[ 6846.683589] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-ost1
[ 6846.994289] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_OSS/P1
[ 6847.287819] Lustre: DEBUG MARKER: e2label /dev/lvm-Role_OSS/P1
[ 6847.741015] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-ost1; mount -t lustre /dev/lvm-Role_OSS/P1 /mnt/lustre-ost1
[ 6847.763478] LustreError: 137-5: lustre-OST0000_UUID: not available for connect from 10.2.8.29@tcp (no target). If you are running an HA pair check that the target is mounted on the other server.
[ 6848.086287] LDISKFS-fs (dm-0): file extents enabled, maximum tree depth=5
[ 6848.088755] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: ,errors=remount-ro,no_mbcache,nodelalloc
[ 6848.133350] LustreError: 23480:0:(dt_object.h:2509:dt_statfs()) ASSERTION( dev ) failed: 
[ 6848.134405] LustreError: 23480:0:(dt_object.h:2509:dt_statfs()) LBUG
[ 6848.135065] Pid: 23480, comm: lctl
[ 6848.135417] 
[ 6848.135417] Call Trace:
[ 6848.135842] [&amp;lt;ffffffffc05d27ae&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[ 6848.136522] [&amp;lt;ffffffffc05d283c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[ 6848.137206] [&amp;lt;ffffffffc0a9cde2&amp;gt;] tgt_statfs_internal+0x2f2/0x360 [ptlrpc]
[ 6848.137932] [&amp;lt;ffffffffc0d7d266&amp;gt;] ofd_statfs+0x66/0x470 [ofd]
[ 6848.138690] [&amp;lt;ffffffffc07d00c6&amp;gt;] lprocfs_filesfree_seq_show+0xf6/0x530 [obdclass]
[ 6848.139484] [&amp;lt;ffffffff811f4d72&amp;gt;] ? __mem_cgroup_commit_charge+0xe2/0x2f0
[ 6848.140176] [&amp;lt;ffffffff8119320e&amp;gt;] ? lru_cache_add+0xe/0x10
[ 6848.140744] [&amp;lt;ffffffff811be298&amp;gt;] ? page_add_new_anon_rmap+0xb8/0x170
[ 6848.141421] [&amp;lt;ffffffff811e23d5&amp;gt;] ? __kmalloc+0x55/0x230
[ 6848.141987] [&amp;lt;ffffffff81227eb7&amp;gt;] ? seq_buf_alloc+0x17/0x40
[ 6848.142581] [&amp;lt;ffffffffc0d91142&amp;gt;] ofd_filesfree_seq_show+0x12/0x20 [ofd]
[ 6848.143274] [&amp;lt;ffffffff812283ba&amp;gt;] seq_read+0x10a/0x3b0
[ 6848.143810] [&amp;lt;ffffffff8127248d&amp;gt;] proc_reg_read+0x3d/0x80
[ 6848.144372] [&amp;lt;ffffffff8120295c&amp;gt;] vfs_read+0x9c/0x170
[ 6848.144902] [&amp;lt;ffffffff8120381f&amp;gt;] SyS_read+0x7f/0xe0
[ 6848.145407] [&amp;lt;ffffffff816b8929&amp;gt;] ? system_call_after_swapgs+0x156/0x214
[ 6848.146096] [&amp;lt;ffffffff816b89fd&amp;gt;] system_call_fastpath+0x16/0x1b
[ 6848.146717] [&amp;lt;ffffffff816b889d&amp;gt;] ? system_call_after_swapgs+0xca/0x214
[ 6848.147392] 
[ 6848.147570] Kernel panic - not syncing: LBUG
[ 6848.148014] CPU: 0 PID: 23480 Comm: lctl Tainted: G OE ------------ 3.10.0-693.11.6.el7_lustre.x86_64 #1
[ 6848.149072] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 6848.149645] Call Trace:
[ 6848.149913] [&amp;lt;ffffffff816a5e7d&amp;gt;] dump_stack+0x19/0x1b
[ 6848.150436] [&amp;lt;ffffffff8169fd64&amp;gt;] panic+0xe8/0x20d
[ 6848.150935] [&amp;lt;ffffffffc05d2854&amp;gt;] lbug_with_loc+0x64/0xb0 [libcfs]
[ 6848.151590] [&amp;lt;ffffffffc0a9cde2&amp;gt;] tgt_statfs_internal+0x2f2/0x360 [ptlrpc]
[ 6848.152299] [&amp;lt;ffffffffc0d7d266&amp;gt;] ofd_statfs+0x66/0x470 [ofd]
[ 6848.152901] [&amp;lt;ffffffffc07d00c6&amp;gt;] lprocfs_filesfree_seq_show+0xf6/0x530 [obdclass]
[ 6848.153663] [&amp;lt;ffffffff811f4d72&amp;gt;] ? __mem_cgroup_commit_charge+0xe2/0x2f0
[ 6848.154348] [&amp;lt;ffffffff8119320e&amp;gt;] ? lru_cache_add+0xe/0x10
[ 6848.154912] [&amp;lt;ffffffff811be298&amp;gt;] ? page_add_new_anon_rmap+0xb8/0x170
[ 6848.155559] [&amp;lt;ffffffff811e23d5&amp;gt;] ? __kmalloc+0x55/0x230
[ 6848.156104] [&amp;lt;ffffffff81227eb7&amp;gt;] ? seq_buf_alloc+0x17/0x40
[ 6848.156685] [&amp;lt;ffffffffc0d91142&amp;gt;] ofd_filesfree_seq_show+0x12/0x20 [ofd]
[ 6848.157357] [&amp;lt;ffffffff812283ba&amp;gt;] seq_read+0x10a/0x3b0
[ 6848.157891] [&amp;lt;ffffffff8127248d&amp;gt;] proc_reg_read+0x3d/0x80
[ 6848.158436] [&amp;lt;ffffffff8120295c&amp;gt;] vfs_read+0x9c/0x170
[ 6848.158955] [&amp;lt;ffffffff8120381f&amp;gt;] SyS_read+0x7f/0xe0
[ 6848.159458] [&amp;lt;ffffffff816b8929&amp;gt;] ? system_call_after_swapgs+0x156/0x214
[ 6848.160130] [&amp;lt;ffffffff816b89fd&amp;gt;] system_call_fastpath+0x16/0x1b
[ 6848.160745] [&amp;lt;ffffffff816b889d&amp;gt;] ? system_call_after_swapgs+0xca/0x214
[ 0.000000] Initializing cgroup subsys cpuset
[ 0.000000] Initializing cgroup subsys cpu
[ 0.000000] Initializing cgroup subsys cpuacct

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;VVVVVVV DO NOT REMOVE LINES BELOW, Added by Maloo for auto-association VVVVVVV&lt;br/&gt;
 sanity test_276 - Timeout occurred after 175 mins, last suite running was sanity, restarting cluster to continue tests&lt;/p&gt;</description>
                <environment></environment>
        <key id="51387">LU-10820</key>
            <summary>Interop 2.10.3 &lt;-&gt;2.11 sanity test_276: (dt_object.h:2509:dt_statfs()) ASSERTION( dev ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Fri, 16 Mar 2018 01:19:26 +0000</created>
                <updated>Mon, 16 Apr 2018 19:34:05 +0000</updated>
                                            <version>Lustre 2.11.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="223863" author="jamesanunez" created="Fri, 16 Mar 2018 17:32:18 +0000"  >&lt;p&gt;A port of the following patch to b2_10 should fix this issue&#160;&lt;/p&gt;

&lt;p&gt;--&lt;a href=&quot;https://review.whamcloud.com/#/c/31243/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/31243/&lt;/a&gt; &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10650&quot; title=&quot;cslco1705 crash: dt_statfs()) ASSERTION( dev ) failed: LBUG, Pid: 3372, comm: lctl&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10650&quot;&gt;&lt;del&gt;LU-10650&lt;/del&gt;&lt;/a&gt;&#160;obd: add check to obd_statfs&lt;/p&gt;</comment>
                            <comment id="226103" author="jamesanunez" created="Mon, 16 Apr 2018 19:34:05 +0000"  >&lt;p&gt;The b2_10 patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10650&quot; title=&quot;cslco1705 crash: dt_statfs()) ASSERTION( dev ) failed: LBUG, Pid: 3372, comm: lctl&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10650&quot;&gt;&lt;del&gt;LU-10650&lt;/del&gt;&lt;/a&gt;, &lt;a href=&quot;https://review.whamcloud.com/#/c/31752/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/31752/&lt;/a&gt;, landed to the b2_10 branch with build # 99. When interop testing completes for build # 99 or greater, we should verify that this interop issue is fixed. &lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="50728">LU-10650</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzugf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>