<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:20:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-15706] &quot;lctl pool_destroy&quot; can&apos;t work correctly due to &quot;SKIP&quot; records</title>
                <link>https://jira.whamcloud.com/browse/LU-15706</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;&quot;lctl pool_destroy&quot; can&apos;t work correctly when MGS and MDT0 is from different node.&lt;/p&gt;

&lt;p&gt;Then I did the following test on two nodes(centos7-2 and centos7-4) and found it failed because it didn&apos;t process &quot;SKIP&quot; pool llog records properly.&lt;/p&gt;

&lt;p&gt;centos7-2 (MDT+2OST+client):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@centos7-2 tests]# MGSDEV=/tmp/lustre-mgs mgs_HOST=centos7-4 mdt_HOST=centos7-2 ost1_HOST=centos7-2 ost2_HOST=centos7-2 PDSH=&quot;pdsh -S -Rssh -w&quot; sh llmount.sh
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;centos7-4(MGS):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@centos7-4 tests]# lctl pool_new lustre.testpool
Pool lustre.testpool created
[root@centos7-4 tests]# lctl pool_add lustre.testpool OST0000
OST lustre-OST0000_UUID added to pool lustre.testpool
[root@centos7-4 tests]# lctl pool_add lustre.testpool OST0001
OST lustre-OST0001_UUID added to pool lustre.testpool
[root@centos7-4 tests]# lctl pool_destroy lustre.testpool
Pool lustre.testpool not empty, please remove all members
pool_destroy: Directory not empty
[root@centos7-4 tests]# lctl pool_remove lustre.testpool OST0000
OST lustre-OST0000_UUID removed from pool lustre.testpool
[root@centos7-4 tests]# lctl pool_destroy lustre.testpool
Pool lustre.testpool destroyed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&quot;testpool&quot; was destroyed wrongly even OST0001 was still in the pool.&lt;/p&gt;

&lt;p&gt;After checking the llog records, I found &quot;SKIP pool add 0:lustre-clilov&#160; 1:lustre&#160; 2:testpool&#160; 3:lustre-OST0000_UUID&quot; was skipped.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#43 (112)SKIP pool new 0:lustre-clilov  1:lustre  2:testpool  
#44 (224)SKIP END   marker  29 (flags=0x06, v2.15.0.0) lustre-clilov   &apos;new lustre.testpool&apos; Wed Mar 30 11:02:17 2022-Wed Mar 30 11:02:51 2022
#45 (224)SKIP START marker  31 (flags=0x05, v2.15.0.0) lustre-clilov   &apos;add lustre.testpool.lustre-OST0000_UUID&apos; Wed Mar 30 11:02:28 2022-Wed Mar 30 11:02:49 2022
#46 (136)SKIP pool add 0:lustre-clilov  1:lustre  2:testpool  3:lustre-OST0000_UUID  
#48 (224)SKIP END   marker  31 (flags=0x06, v2.15.0.0) lustre-clilov   &apos;add lustre.testpool.lustre-OST0000_UUID&apos; Wed Mar 30 11:02:28 2022-Wed Mar 30 11:02:49 2022
#49 (224)marker  33 (flags=0x01, v2.15.0.0) lustre-clilov   &apos;add lustre.testpool.lustre-OST0001_UUID&apos; Wed Mar 30 11:02:30 2022-
#50 (136)pool add 0:lustre-clilov  1:lustre  2:testpool  3:lustre-OST0001_UUID  
#51 (224)END   marker  33 (flags=0x02, v2.15.0.0) lustre-clilov   &apos;add lustre.testpool.lustre-OST0001_UUID&apos; Wed Mar 30 11:02:30 2022-
#52 (224)marker  35 (flags=0x01, v2.15.0.0) lustre-clilov   &apos;rem lustre.testpool.lustre-OST0000_UUID&apos; Wed Mar 30 11:02:49 2022-
#53 (136)pool remove 0:lustre-clilov  1:lustre  2:testpool  3:lustre-OST0000_UUID  
#54 (224)END   marker  35 (flags=0x02, v2.15.0.0) lustre-clilov   &apos;rem lustre.testpool.lustre-OST0000_UUID&apos; Wed Mar 30 11:02:49 2022-
#55 (224)marker  37 (flags=0x01, v2.15.0.0) lustre-clilov   &apos;del lustre.testpool&apos; Wed Mar 30 11:02:51 2022-
#56 (112)pool destroy 0:lustre-clilov  1:lustre  2:testpool  
#57 (224)END   marker  37 (flags=0x02, v2.15.0.0) lustre-clilov   &apos;del lustre.testpool&apos; Wed Mar 30 11:02:51 2022-
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I will look into this issue.&lt;/p&gt;</description>
                <environment></environment>
        <key id="69495">LU-15706</key>
            <summary>&quot;lctl pool_destroy&quot; can&apos;t work correctly due to &quot;SKIP&quot; records</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="emoly.liu">Emoly Liu</assignee>
                                    <reporter username="emoly.liu">Emoly Liu</reporter>
                        <labels>
                    </labels>
                <created>Wed, 30 Mar 2022 07:45:05 +0000</created>
                <updated>Fri, 20 Oct 2023 11:49:42 +0000</updated>
                            <resolved>Mon, 27 Jun 2022 15:52:36 +0000</resolved>
                                                    <fixVersion>Lustre 2.16.0</fixVersion>
                    <fixVersion>Lustre 2.15.2</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="330611" author="gerrit" created="Wed, 30 Mar 2022 12:41:17 +0000"  >&lt;p&gt;&quot;Emoly Liu &amp;lt;emoly@whamcloud.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/46951&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/46951&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15706&quot; title=&quot;&amp;quot;lctl pool_destroy&amp;quot; can&amp;#39;t work correctly due to &amp;quot;SKIP&amp;quot; records&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15706&quot;&gt;&lt;del&gt;LU-15706&lt;/del&gt;&lt;/a&gt; lctl: deal with &quot;SKIP&quot; pool record correctly&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 67aca5483a7718c75b82b9fb20c3ad1fccd4b700&lt;/p&gt;</comment>
                            <comment id="330784" author="emoly.liu" created="Fri, 1 Apr 2022 03:26:31 +0000"  >&lt;p&gt;During Maloo test and my local test, I also noticed the following issue, that is, if jt_llog_print_iter() tries to get llog records more times in for loop, the &quot;boundary&quot; record is easy to be processed by llog_print_cb()-&amp;gt;class_config_yaml_output() improperly, skipped or not skipped wrongly.&#160;&lt;/p&gt;

&lt;p&gt;For example, in the failure at &lt;a href=&quot;https://testing.whamcloud.com/test_logs/ca4871c9-d298-4a2a-99f6-7449aa6fb803/show_text&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_logs/ca4871c9-d298-4a2a-99f6-7449aa6fb803/show_text&lt;/a&gt; , it says &quot;trevis-39vm5: OST lustre-OST0006_UUID is not part of the &apos;lustre&apos; fs.&quot;. I can reproduce this issue locally, and I found the reason as follows:&lt;br/&gt;
The original llog records output by debugfs are:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#23 (128)attach    0:lustre-OST0000-osc  1:osc  2:lustre-clilov_UUID
#24 (144)setup     0:lustre-OST0000-osc  1:lustre-OST0000_UUID  2:192.168.0.118@tcp
#25 (128)lov_modify_tgts add 0:lustre-clilov  1:lustre-OST0000_UUID  2:0  3:1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;But, the record output by class_config_yaml_output() shows 24th and 25th records were skipped wrongly, that is why OST0000 can&apos;t be found.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;- { index: 22, event: add_uuid, nid: 192.168.0.118@tcp(0x20000c0a80076), node: 192.168.0.118@tcp }
- { index: 23, event: attach, device: lustre-OST0000-osc, type: osc, UUID: lustre-clilov_UUID }
======BOUNDARY======
- { index: 31, event: add_uuid, nid: 192.168.0.118@tcp(0x20000c0a80076), node: 192.168.0.118@tcp }
- { index: 32, event: attach, device: lustre-OST0001-osc, type: osc, UUID: lustre-clilov_UUID }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;Another, in lost-pool.sh test23a failed due to &quot;Pool lustre.testpool not empty, please remove all members pool_destroy: Directory not empty &quot;, because the 1412th record was not skipped correctly.&lt;/p&gt;

&lt;p&gt;The original llog records output by debugfs are:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#1412 (136)SKIP pool add 0:lustre-clilov  1:lustre  2:testpool  3:lustre-OST0003_UUID
#1413 (224)SKIP END   marker 939 (flags=0x06, v2.15.0.0) lustre-clilov   &apos;add lustre.testpool.lustre-OST0003_UUID&apos; Fri Apr  1 08:47:01 2022-Fri Apr  1 08:47:02 2022
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;But class_config_yaml_output() didn&apos;t add SKIP to this record properly, so that OST0003 was considered still in testpool wrongly.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;=====BOUNDARY=====
- { index: 1412, event: add_pool, device: lustre-clilov, fsname: lustre, pool: testpool, ost: lustre-OST0003_UUID }
- { index: 1428, event: remove_pool, device: lustre-clilov, fsname: lustre, pool: testpool, ost: lustre-OST0001_UUID }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;I will look into the following part of code in class_config_yaml_output() and make a fix.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;        if (lcfg-&amp;gt;lcfg_command == LCFG_MARKER) {
                struct cfg_marker *marker = lustre_cfg_buf(lcfg, 1);

                lustre_swab_cfg_marker(marker, swab,
                                       LUSTRE_CFG_BUFLEN(lcfg, 1));
                if (marker-&amp;gt;cm_flags &amp;amp; CM_START) {
                        *cfg_flags = CFG_F_MARKER;
                        if (marker-&amp;gt;cm_flags &amp;amp; CM_SKIP)
                                *cfg_flags = CFG_F_SKIP;
                } else if (marker-&amp;gt;cm_flags &amp;amp; CM_END) {
                        *cfg_flags = 0;
                }
                if (likely(!raw))
                        return 0;
        }

        /* entries outside marker are skipped */
        if (!(*cfg_flags &amp;amp; CFG_F_MARKER) &amp;amp;&amp;amp; !raw)
                return 0;

        /* inside skipped marker */
        if (*cfg_flags &amp;amp; CFG_F_SKIP &amp;amp;&amp;amp; !raw)
                return 0;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="331184" author="emoly.liu" created="Wed, 6 Apr 2022 10:04:28 +0000"  >&lt;p&gt;The root cause of this issue is that if the &quot;start&quot; several record in one ioctl request are just the ones &quot;protected&quot; by one marker, they will don&apos;t know their &quot;SKIP or not&quot; flag, and then will be mis-labeled in class_config_yaml_output().&lt;/p&gt;

&lt;p&gt;I&apos;m trying to remember the flag in llog_print_cb() to fix this issue.&lt;/p&gt;</comment>
                            <comment id="338853" author="gerrit" created="Mon, 27 Jun 2022 04:53:01 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/46951/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/46951/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15706&quot; title=&quot;&amp;quot;lctl pool_destroy&amp;quot; can&amp;#39;t work correctly due to &amp;quot;SKIP&amp;quot; records&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15706&quot;&gt;&lt;del&gt;LU-15706&lt;/del&gt;&lt;/a&gt; llog: deal with &quot;SKIP&quot; pool llog records correctly&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 474f2670d63b66a77ee3acb72b18bc7b5afbec84&lt;/p&gt;</comment>
                            <comment id="338911" author="pjones" created="Mon, 27 Jun 2022 15:52:36 +0000"  >&lt;p&gt;Landed for 2.16&lt;/p&gt;</comment>
                            <comment id="354040" author="gerrit" created="Thu, 24 Nov 2022 07:16:19 +0000"  >&lt;p&gt;&quot;Li Dongyang &amp;lt;dongyangli@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/49230&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/49230&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15706&quot; title=&quot;&amp;quot;lctl pool_destroy&amp;quot; can&amp;#39;t work correctly due to &amp;quot;SKIP&amp;quot; records&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15706&quot;&gt;&lt;del&gt;LU-15706&lt;/del&gt;&lt;/a&gt; llog: deal with &quot;SKIP&quot; pool llog records correctly&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_15&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8f3127f0e20992a630838db5f65daf4b008c5dc3&lt;/p&gt;</comment>
                            <comment id="355234" author="gerrit" created="Tue, 6 Dec 2022 02:54:27 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/49230/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/49230/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15706&quot; title=&quot;&amp;quot;lctl pool_destroy&amp;quot; can&amp;#39;t work correctly due to &amp;quot;SKIP&amp;quot; records&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15706&quot;&gt;&lt;del&gt;LU-15706&lt;/del&gt;&lt;/a&gt; llog: deal with &quot;SKIP&quot; pool llog records correctly&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_15&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 70accf10fac5032e185d981b0a0193aad9980371&lt;/p&gt;</comment>
                            <comment id="390045" author="gerrit" created="Fri, 20 Oct 2023 11:49:42 +0000"  >&lt;p&gt;&quot;Etienne AUJAMES &amp;lt;eaujames@ddn.com&amp;gt;&quot; uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/c/fs/lustre-release/+/52773&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/c/fs/lustre-release/+/52773&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-15706&quot; title=&quot;&amp;quot;lctl pool_destroy&amp;quot; can&amp;#39;t work correctly due to &amp;quot;SKIP&amp;quot; records&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-15706&quot;&gt;&lt;del&gt;LU-15706&lt;/del&gt;&lt;/a&gt; llog: deal with &quot;SKIP&quot; pool llog records correctly&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 30a7ee61140655d2dac1fd43ade538a71b384787&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="66773">LU-15142</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="73314">LU-16324</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i02m13:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>