<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:41:47 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4334] With ZFS can only declare a single mgsnode for MDT or OST</title>
                <link>https://jira.whamcloud.com/browse/LU-4334</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;When trying to declare two mgsnodes for HA only one entry is ever accepted when using backfstype=zfs. All below examples fail, if the --mgsnode=nid:nid syntax worked then all might be okay.&lt;/p&gt;

&lt;p&gt;mkfs.lustre --reformat --fsname=RSF1 --ost --index=2 --mgsnode=10.82.0.9@tcp1 --mgsnode=10.82.0.9@tcp1:10.82.0.10@tcp1 --servicenode=10.82.0.11@tcp1:10.82.0.12@tcp1 --backfstype=zfs OST2/ost&lt;/p&gt;

&lt;p&gt;mkfs.lustre --reformat --fsname=RSF1 --ost --index=2 --mgsnode=10.82.0.9@tcp1:10.82.0.10@tcp1 --servicenode=10.82.0.11@tcp1:10.82.0.12@tcp1 --backfstype=zfs OST2/ost&lt;/p&gt;</description>
                <environment>SL6.4</environment>
        <key id="22291">LU-4334</key>
            <summary>With ZFS can only declare a single mgsnode for MDT or OST</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="ekolb">Eric Kolb</reporter>
                        <labels>
                            <label>llnl</label>
                            <label>prz</label>
                            <label>zfs</label>
                    </labels>
                <created>Mon, 2 Dec 2013 18:44:28 +0000</created>
                <updated>Mon, 3 Oct 2016 22:50:42 +0000</updated>
                            <resolved>Thu, 11 Sep 2014 19:33:15 +0000</resolved>
                                    <version>Lustre 2.4.1</version>
                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.5.2</version>
                                    <fixVersion>Lustre 2.7.0</fixVersion>
                    <fixVersion>Lustre 2.5.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>14</watches>
                                                                            <comments>
                            <comment id="72630" author="ekolb" created="Mon, 2 Dec 2013 18:49:23 +0000"  >&lt;p&gt;Perhaps the below sequence displays the issue more clearly.&lt;/p&gt;

&lt;p&gt;$ tunefs.lustre  OST2/ost&lt;br/&gt;
checking for existing Lustre data: found&lt;/p&gt;

&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.9@tcp1&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.9@tcp1&lt;/p&gt;

&lt;p&gt;Writing OST2/ost properties&lt;br/&gt;
  lustre:version=1&lt;br/&gt;
  lustre:flags=4098&lt;br/&gt;
  lustre:index=2&lt;br/&gt;
  lustre:fsname=RSF1&lt;br/&gt;
  lustre:svname=RSF1-OST0002&lt;br/&gt;
  lustre:failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1&lt;br/&gt;
  lustre:mgsnode=10.82.0.9@tcp1&lt;/p&gt;

&lt;p&gt;$ tunefs.lustre --mgsnode=10.82.0.9@tcp1 --mgsnode=10.82.0.10@tcp1 OST2/ost&lt;br/&gt;
checking for existing Lustre data: found&lt;/p&gt;

&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.9@tcp1&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.9@tcp1 mgsnode=10.82.0.10@tcp1&lt;/p&gt;

&lt;p&gt;Writing OST2/ost properties&lt;br/&gt;
  lustre:version=1&lt;br/&gt;
  lustre:flags=4098&lt;br/&gt;
  lustre:index=2&lt;br/&gt;
  lustre:fsname=RSF1&lt;br/&gt;
  lustre:svname=RSF1-OST0002&lt;br/&gt;
  lustre:failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1&lt;br/&gt;
  lustre:mgsnode=10.82.0.9@tcp1&lt;br/&gt;
  lustre:mgsnode=10.82.0.10@tcp1&lt;/p&gt;

&lt;p&gt;$ tunefs.lustre  OST2/ost&lt;br/&gt;
checking for existing Lustre data: found&lt;/p&gt;

&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.10@tcp1&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     RSF1-OST0002&lt;br/&gt;
Index:      2&lt;br/&gt;
Lustre FS:  RSF1&lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1002&lt;br/&gt;
              (OST no_primnode )&lt;br/&gt;
Persistent mount opts: &lt;br/&gt;
Parameters: failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1 mgsnode=10.82.0.10@tcp1&lt;/p&gt;

&lt;p&gt;Writing OST2/ost properties&lt;br/&gt;
  lustre:version=1&lt;br/&gt;
  lustre:flags=4098&lt;br/&gt;
  lustre:index=2&lt;br/&gt;
  lustre:fsname=RSF1&lt;br/&gt;
  lustre:svname=RSF1-OST0002&lt;br/&gt;
  lustre:failover.node=10.82.0.11@tcp1:10.82.0.12@tcp1&lt;br/&gt;
  lustre:mgsnode=10.82.0.10@tcp1&lt;/p&gt;</comment>
                            <comment id="73957" author="jslandry" created="Fri, 20 Dec 2013 18:42:37 +0000"  >&lt;p&gt;Hi, this syntax works.  --mgsnode=node1:node2&lt;/p&gt;


&lt;ol&gt;
	&lt;li&gt;tunefs.lustre lustre1-ost4/ost0&lt;br/&gt;
checking for existing Lustre data: found&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     lustre1-OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.3@o2ib failover.node=10.225.4.4@o2ib&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     lustre1:OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.3@o2ib failover.node=10.225.4.4@o2ib&lt;/p&gt;

&lt;p&gt;Writing lustre1-ost4/ost0 properties&lt;br/&gt;
  lustre:version=1  &lt;br/&gt;
  lustre:flags=4130 &lt;br/&gt;
  lustre:index=4&lt;br/&gt;
  lustre:fsname=lustre1&lt;br/&gt;
  lustre:svname=lustre1:OST0004&lt;br/&gt;
  lustre:mgsnode=10.225.8.3@o2ib&lt;br/&gt;
  lustre:failover.node=10.225.4.4@o2ib&lt;/p&gt;



&lt;ol&gt;
	&lt;li&gt;tunefs.lustre --mgsnode=mds1-225@o2ib:mds2-225@o2ib lustre1-ost4/ost0&lt;br/&gt;
checking for existing Lustre data: found&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     lustre1-OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.3@o2ib failover.node=10.225.4.4@o2ib&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     lustre1:OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.3@o2ib failover.node=10.225.4.4@o2ib mgsnode=10.225.8.2@o2ib:10.225.8.3@o2ib&lt;/p&gt;

&lt;p&gt;Writing lustre1-ost4/ost0 properties&lt;br/&gt;
  lustre:version=1  &lt;br/&gt;
  lustre:flags=4130 &lt;br/&gt;
  lustre:index=4&lt;br/&gt;
  lustre:fsname=lustre1&lt;br/&gt;
  lustre:svname=lustre1:OST0004&lt;br/&gt;
  lustre:mgsnode=10.225.8.3@o2ib&lt;br/&gt;
  lustre:failover.node=10.225.4.4@o2ib&lt;br/&gt;
  lustre:mgsnode=10.225.8.2@o2ib:10.225.8.3@o2ib&lt;/p&gt;



&lt;ol&gt;
	&lt;li&gt;tunefs.lustre lustre1-ost4/ost0&lt;br/&gt;
checking for existing Lustre data: found&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;   Read previous values:&lt;br/&gt;
Target:     lustre1-OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.2@o2ib:10.225.8.3@o2ib failover.node=10.225.4.4@o2ib&lt;/p&gt;


&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     lustre1:OST0004&lt;br/&gt;
Index:      4&lt;br/&gt;
Lustre FS:  lustre1 &lt;br/&gt;
Mount type: zfs&lt;br/&gt;
Flags:      0x1022  &lt;br/&gt;
              (OST first_time no_primnode )&lt;br/&gt;
Persistent mount opts:&lt;br/&gt;
Parameters: mgsnode=10.225.8.2@o2ib:10.225.8.3@o2ib failover.node=10.225.4.4@o2ib&lt;/p&gt;

&lt;p&gt;Writing lustre1-ost4/ost0 properties&lt;br/&gt;
  lustre:version=1  &lt;br/&gt;
  lustre:flags=4130 &lt;br/&gt;
  lustre:index=4&lt;br/&gt;
  lustre:fsname=lustre1&lt;br/&gt;
  lustre:svname=lustre1:OST0004&lt;br/&gt;
  lustre:mgsnode=10.225.8.2@o2ib:10.225.8.3@o2ib&lt;br/&gt;
  lustre:failover.node=10.225.4.4@o2ib&lt;/p&gt;</comment>
                            <comment id="74042" author="ekolb" created="Mon, 23 Dec 2013 17:43:21 +0000"  >&lt;p&gt;Hello,&lt;/p&gt;

&lt;p&gt;Yes the --mgsnode=nid:nid setting can be applied to the MDTs and OSTs but the fail-over does not occur. The Lustre components seem only to use the fist nid in the specified list and upon fail-over of the MGS they will not use the second nid specified. &lt;/p&gt;</comment>
                            <comment id="82461" author="utopiabound" created="Fri, 25 Apr 2014 12:19:43 +0000"  >&lt;p&gt;It looks like there are two separate issues here:&lt;/p&gt;

&lt;p&gt;1) listing failover or mgsnode in the form --mgsnode=NID1 --mgsnode=NID2 or --mgsnode=NID1,NID2 will result in only NID2 being recorded.&lt;/p&gt;

&lt;p&gt;This seems to be due to how metadata is stored on zfs, that property names are unique, thus setting it twice will just overwrite the first with the second.&lt;/p&gt;

&lt;p&gt;2) listing nids in the form --mgsnode=NID1:NID2 will result in only NID1 being used&lt;/p&gt;</comment>
                            <comment id="86047" author="morrone" created="Fri, 6 Jun 2014 20:43:34 +0000"  >&lt;p&gt;As part of fixing this issue, we need to make certain that the relevant OSD documentation is updated to clearly define the APIs and expecations for the OSD developer.&lt;/p&gt;</comment>
                            <comment id="89290" author="utopiabound" created="Wed, 16 Jul 2014 20:48:27 +0000"  >&lt;p&gt;It seems like the right idea would be to store NID information in a single ZFS property &amp;lt;server1ip1&amp;gt;@tcp,&amp;lt;server1ip2&amp;gt;@tcp:&amp;lt;server2ip1&amp;gt;@tcp,&amp;lt;server2ip2&amp;gt;@tcp similar to how it can be input on the command line.&lt;/p&gt;

&lt;p&gt;This will apply to mgsnode, failnode, and servicenode.&lt;/p&gt;</comment>
                            <comment id="89622" author="utopiabound" created="Mon, 21 Jul 2014 14:51:40 +0000"  >&lt;p&gt;This will fix setting multiple mgsnode properties on ZFS.&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/11161&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11161&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;b&gt;Workaround for older systems:&lt;/b&gt;&lt;/p&gt;

&lt;p&gt;Instead of&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;tunefs.lustre --mgsnode=192.168.139.10@tcp --mgsnode=192.168.139.70@tcp mdt/mdt1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Use the following:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;zfs set lustre:mgsnode=192.168.139.10@tcp:192.168.139.70@tcp mdt/mdt1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="93803" author="pjones" created="Thu, 11 Sep 2014 19:33:15 +0000"  >&lt;p&gt;Landed for 2.7&lt;/p&gt;</comment>
                            <comment id="96206" author="chris" created="Mon, 13 Oct 2014 12:27:43 +0000"  >&lt;p&gt;Does&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;zfs set lustre:mgsnode=192.168.139.10@tcp:192.168.139.70@tcp mdt/mdt1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;imply that &apos;:&apos; is the standard separator for zfs properties, for example in the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4749&quot; title=&quot;ZFS-backed OST  mkfs.lustre --servicenode does not correctly add failover_nids&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4749&quot;&gt;&lt;del&gt;LU-4749&lt;/del&gt;&lt;/a&gt; does failnode be split the same way.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="23591">LU-4749</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="37695">LU-8311</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                    <customfield id="customfield_10030" key="com.atlassian.jira.plugin.system.customfieldtypes:labels">
                        <customfieldname>Epic/Theme</customfieldname>
                        <customfieldvalues>
                                        <label>zfs</label>
    
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwae7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11857</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10020"><![CDATA[1]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>