<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:21:21 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-15794] Downgrade client fails: LustreError: 11993:0:(lov_object.c:551:lov_init_dom()) ASSERTION( index == 0 ) failed:</title>
                <link>https://jira.whamcloud.com/browse/LU-15794</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Immediately on starting sanity, after downgrading the client -&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ &#160;368.034864] Lustre: DEBUG MARKER: trevis-86vm1.trevis.whamcloud.com: executing check_config_client /mnt/lustre^M
[ &#160;370.048716] Lustre: DEBUG MARKER: Using TIMEOUT=100^M
[ &#160;371.064948] LustreError: 11993:0:(lov_object.c:551:lov_init_dom()) ASSERTION( index == 0 ) failed: ^M
[ &#160;371.066828] LustreError: 11993:0:(lov_object.c:551:lov_init_dom()) LBUG^M
[ &#160;371.068127] Pid: 11993, comm: rm 3.10.0-1160.45.1.el7.x86_64 #1 SMP Wed Oct 13 17:20:51 UTC 2021^M
[ &#160;371.069809] Call Trace:^M
[ &#160;371.070360] &#160;[&amp;lt;ffffffffc08b57cc&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]^M
[ &#160;371.071728] &#160;[&amp;lt;ffffffffc08b587c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]^M
[ &#160;371.073141] &#160;[&amp;lt;ffffffffc0d3df99&amp;gt;] lov_init_dom+0x8f9/0x990 [lov]^M
[ &#160;371.074443] &#160;[&amp;lt;ffffffffc0d3e85c&amp;gt;] lov_init_composite+0x82c/0xbe0 [lov]^M
[ &#160;371.075827] &#160;[&amp;lt;ffffffffc0d3ad00&amp;gt;] lov_object_init+0x130/0x300 [lov]^M
[ &#160;371.077174] &#160;[&amp;lt;ffffffffc0a3454b&amp;gt;] lu_object_start.isra.31+0x8b/0x120 [obdclass]^M
[ &#160;371.078719] &#160;[&amp;lt;ffffffffc0a37a54&amp;gt;] lu_object_find_at+0x234/0xab0 [obdclass]^M
[ &#160;371.080157] &#160;[&amp;lt;ffffffffc0a3830f&amp;gt;] lu_object_find_slice+0x1f/0x90 [obdclass]^M
[ &#160;371.081625] &#160;[&amp;lt;ffffffffc0a3cba2&amp;gt;] cl_object_find+0x32/0x60 [obdclass]^M
[ &#160;371.082966] &#160;[&amp;lt;ffffffffc0e9e599&amp;gt;] cl_file_inode_init+0x219/0x380 [lustre]^M
[ &#160;371.084399] &#160;[&amp;lt;ffffffffc0e78595&amp;gt;] ll_update_inode+0x2d5/0x5e0 [lustre]^M
[ &#160;371.085759] &#160;[&amp;lt;ffffffffc0e78907&amp;gt;] ll_read_inode2+0x67/0x420 [lustre]^M
[ &#160;371.087094] &#160;[&amp;lt;ffffffffc0e8661b&amp;gt;] ll_iget+0xdb/0x350 [lustre]^M
[ &#160;371.088315] &#160;[&amp;lt;ffffffffc0e7a6f3&amp;gt;] ll_prep_inode+0x253/0x970 [lustre]^M
[ &#160;371.089648] &#160;[&amp;lt;ffffffffc0e87653&amp;gt;] ll_lookup_it+0x523/0x1a20 [lustre]^M
[ &#160;371.090989] &#160;[&amp;lt;ffffffffc0e89f9b&amp;gt;] ll_lookup_nd+0xbb/0x190 [lustre]^M
[ &#160;371.092301] &#160;[&amp;lt;ffffffffa0c591d3&amp;gt;] lookup_real+0x23/0x60^M
[ &#160;371.093424] &#160;[&amp;lt;ffffffffa0c59bf2&amp;gt;] __lookup_hash+0x42/0x60^M
[ &#160;371.094571] &#160;[&amp;lt;ffffffffa0c60adc&amp;gt;] do_unlinkat+0x14c/0x2d0^M
[ &#160;371.095717] &#160;[&amp;lt;ffffffffa0c61bbb&amp;gt;] SyS_unlinkat+0x1b/0x40^M
[ &#160;371.096862] &#160;[&amp;lt;ffffffffa1195f92&amp;gt;] system_call_fastpath+0x25/0x2a^M
[ &#160;371.098126] &#160;[&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff^M
[ &#160;371.099214] Kernel panic - not syncing: LBUG^M
[ &#160;371.100077] CPU: 0 PID: 11993 Comm: rm Kdump: loaded Tainted: G &#160; &#160; &#160; &#160; &#160; OE &#160;------------ &#160; 3.10.0-1160.45.1.el7.x86_64 #1^M
[ &#160;371.102139] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011^M &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>trevis-86 - servers with version=2.15.0_RC3, client b2_12 build 150 version=2.12.8</environment>
        <key id="70048">LU-15794</key>
            <summary>Downgrade client fails: LustreError: 11993:0:(lov_object.c:551:lov_init_dom()) ASSERTION( index == 0 ) failed:</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="cwhite_wc">Cliff White</reporter>
                        <labels>
                    </labels>
                <created>Wed, 27 Apr 2022 22:10:39 +0000</created>
                <updated>Wed, 15 Jun 2022 21:40:26 +0000</updated>
                                            <version>Lustre 2.15.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="333219" author="pjones" created="Wed, 27 Apr 2022 23:10:02 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;What are you thoughts on this error? Could it be related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13249&quot; title=&quot;MDS getting: LustreError: 5095:0:(llog_osd.c:1244:llog_osd_open ()) ASSERTION( dt ) failed:&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13249&quot;&gt;LU-13249&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="333223" author="adilger" created="Wed, 27 Apr 2022 23:48:30 +0000"  >&lt;p&gt;Cliff, presumably the servers were originally running 2.12.8 before upgrading to 2.15.0-RC3?  Were the DOM files that are being removed originally created on 2.15.0 or 2.12.8?&lt;/p&gt;

&lt;p&gt;I think this looks like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15513&quot; title=&quot;crash in lod_fill_mirrors() with sparse OSTs + PFL&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15513&quot;&gt;&lt;del&gt;LU-15513&lt;/del&gt;&lt;/a&gt; (something strange with the DoM layout on the client), and not related to llog at all (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13249&quot; title=&quot;MDS getting: LustreError: 5095:0:(llog_osd.c:1244:llog_osd_open ()) ASSERTION( dt ) failed:&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13249&quot;&gt;LU-13249&lt;/a&gt; is on the server).  I see that this LASSERT is not present on master, having been removed by patch &lt;a href=&quot;https://review.whamcloud.com/35359&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35359&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11421&quot; title=&quot;DoM: manual migration OST-MDT, MDT-MDT&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11421&quot;&gt;&lt;del&gt;LU-11421&lt;/del&gt;&lt;/a&gt; dom: manual OST-to-DOM migration via mirroring&lt;/tt&gt;&quot;:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
-       LASSERT(index == 0);
+       &lt;span class=&quot;code-comment&quot;&gt;/* DOM entry may be not zero index due to FLR but must start from 0 */&lt;/span&gt;
+       &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (unlikely(lle-&amp;gt;lle_extent-&amp;gt;e_start != 0)) {
+               CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;%s: DOM entry must be the first stripe in a mirror\n&quot;&lt;/span&gt;,
+                      lov2obd(dev-&amp;gt;ld_lov)-&amp;gt;obd_name);
+               dump_lsm(D_ERROR, lov-&amp;gt;lo_lsm);
+               RETURN(-EINVAL);
+       }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Cliff, was the problematic file mirrored or migrated while running 2.15.0-RC3?  What does &quot;&lt;tt&gt;lfs getstripe&lt;/tt&gt;&quot; (from a 2.15 client) show?&lt;/p&gt;

&lt;p&gt;I think this looks more like a bug in 2.12 rather than 2.15 (patch was landed as commit v2_12_58-12-g44a721b8c1).  I &lt;em&gt;think&lt;/em&gt; patch has other dependencies (at least  &lt;a href=&quot;https://review.whamcloud.com/45549&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/45549&lt;/a&gt;).  At a minimum it would make sense to backport the LASSERT cleanups to b2_12 for 2.12.9?&lt;/p&gt;</comment>
                            <comment id="333226" author="JIRAUSER17408" created="Thu, 28 Apr 2022 00:06:34 +0000"  >&lt;p&gt;Sequence -&#160;&lt;/p&gt;

&lt;p&gt;Installed system w/2.12.8 (servers and client)&#160; -&#160;&lt;/p&gt;

&lt;p&gt;Upgraded OSS then MDS then client to 2.15.0-RC3&#160;&lt;/p&gt;

&lt;p&gt;Upon downgrade of client, LBUG is triggered&lt;/p&gt;

&lt;p&gt;LBUG continues after server downgrade.&#160;&lt;/p&gt;

&lt;p&gt;No errors at all on the server side, appears to be triggered by something in initial sanity.sh setup as we never get past the startup.&#160;&lt;/p&gt;

&lt;p&gt;MDS log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2325.482110] Lustre: DEBUG MARKER: trevis-86vm2.trevis.whamcloud.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 48
[ 2553.048674] Lustre: MGS: haven&apos;t heard from client 84d85de2-7af6-f334-55a2-6f84ca2afbe9 (at 10.240.43.40@tcp) in 230 seconds. I think it&apos;s dead, and I am evicting it. exp ffff89d48bab8c00, cur 1651103424 expire 1651103274 last 1651103194 &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;Client suite log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
-----============= acceptance-small: sanity ============----- Wed Apr 27 23:46:31 UTC 2022
excepting tests: 103a 103b 103c 104a 160c 161a 161b 161c 208 220 225a 225b 228b 255a 255b 407 253 312 42a 42b 42c 77k
skipping tests SLOW=no: 27m 64b 68 71 115 300o
trevis-86vm1.trevis.whamcloud.com: executing check_config_client /mnt/lustre
trevis-86vm1.trevis.whamcloud.com: Checking config lustre mounted on /mnt/lustre
Checking servers environments
Checking clients trevis-86vm1.trevis.whamcloud.com environments
Using TIMEOUT=100
osc.lustre-OST0000-osc-ffff974f7a0eb800.idle_timeout=debug
disable quota as required
trevis-86vm2: trevis-86vm2.trevis.whamcloud.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 48
trevis-86vm3: trevis-86vm3.trevis.whamcloud.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 48
osd-ldiskfs.track_declares_assert=1
osd-ldiskfs.track_declares_assert=1
~ &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="333227" author="JIRAUSER17408" created="Thu, 28 Apr 2022 00:20:33 +0000"  >&lt;p&gt;tail of Client suite log, set -x&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;++ for var in LNETLND NETTYPE
++ &apos;[&apos; -n tcp &apos;]&apos;
++ echo -n &apos; NETTYPE=tcp&apos;
+ pdsh -t 300 -S -w trevis-86vm2,trevis-86vm3 &apos;(PATH=$PATH:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin; cd /usr/lib64/lustre/tests; LUSTRE=&quot;/usr/lib64/lustre&quot; &#160;mds1_FSTYPE=ldiskfs ost1_FSTYPE=ldiskfs VERBOSE=false FSTYPE=ldiskfs NETTYPE=tcp sh -c &quot;/usr/sbin/lctl set_param
&#160; &#160; &#160;osd-ldiskfs.track_declares_assert=1 || true&quot;)&apos;
osd-ldiskfs.track_declares_assert=1 &lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Systems are still up (trevis-86vm&lt;span class=&quot;error&quot;&gt;&amp;#91;1-3&amp;#93;&lt;/span&gt;)&#160;&#160;&lt;/p&gt;

&lt;p&gt;I will start the 2.14 up/down testing tomorrow on these nodes unless otherwise advised.&#160;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="67153">LU-15219</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="53400">LU-11421</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="43435" name="lbug.downgrade.2.12.txt.gz" size="12610" author="cwhite_wc" created="Wed, 27 Apr 2022 22:10:35 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i02oef:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>