<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:51:21 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5421] MGS and MDT with dual ib port</title>
                <link>https://jira.whamcloud.com/browse/LU-5421</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Dear Team,&lt;/p&gt;

&lt;p&gt;We are looking for HA in terms of IO node, IB port failure.&lt;/p&gt;

&lt;p&gt;In our setup&lt;br/&gt;
IO1  Role MGS, MDT and OST&lt;br/&gt;
IO2  Role MGS, MDT and OST&lt;/p&gt;

&lt;p&gt;2 IB switch independently connected with IO1 and IO2 server.&lt;/p&gt;

&lt;p&gt;IO1-ib0 192.168.2.101&lt;br/&gt;
IO1-ib1 192.168.2.102&lt;br/&gt;
IO2-ib0 192.168.3.101&lt;br/&gt;
I02-ib1  192.168.3.102&lt;/p&gt;

&lt;p&gt;LNET options lnet networks=o2ib0(ib0),o2ib1(ib1)&lt;br/&gt;
Lustre setup&lt;br/&gt;
Prebuilt server rpm&lt;br/&gt;
IO1 and IO2 servers would be on both core networks, o2ib0 and o2ib1&lt;/p&gt;

&lt;p&gt; ssh IO2 &quot;mkfs.lustre --reformat --fsname=lustre --mdt --mgs --index=0 --failnode=192.168.3.101@o2ib1 --failnode=192.168.2.102@o2ib0 --failnode=192.168.3.102@o2ib1 /dev/drbd0&quot;&lt;/p&gt;


&lt;p&gt;ssh IO2 &quot;mkfs.lustre --reformat --fsname=lustre --ost --index=0 \&lt;br/&gt;
							--mgsnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							--mgsnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							--failnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--failnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							/dev/mapper/ost-0&quot;&lt;br/&gt;
ssh IO2 &quot;mount -t lustre /dev/mapper/ost-0 /OST0&quot;&lt;br/&gt;
ssh IO2 &quot;mkfs.lustre --reformat  --fsname=lustre --ost --index=1 \&lt;br/&gt;
							--mgsnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							--mgsnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							--failnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--failnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							/dev/mapper/ost-1&quot;&lt;br/&gt;
ssh IO2 &quot;mount -t lustre /dev/mapper/ost-1 /OST1&quot;&lt;/p&gt;

&lt;p&gt;ssh IO2 &quot;mkfs.lustre --reformat  --fsname=lustre --ost --index=2 \&lt;br/&gt;
							--mgsnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							--mgsnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							--failnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--failnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							/dev/mapper/ost-2&quot;&lt;/p&gt;

&lt;p&gt;ssh IO2 &quot;mount -t lustre /dev/mapper/ost-2 /OST2&quot;&lt;/p&gt;


&lt;ol&gt;
	&lt;li&gt;
	&lt;ol&gt;
		&lt;li&gt;
		&lt;ol&gt;
			&lt;li&gt;
			&lt;ol&gt;
				&lt;li&gt;The Failover will be IO2&lt;br/&gt;
ssh IO1 &quot;mkfs.lustre --reformat  --fsname=lustre --ost --index=3 \&lt;br/&gt;
							--mgsnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							--mgsnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							--failnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--failnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							/dev/mapper/ost-3&quot;&lt;br/&gt;
ssh IO1 &quot;mount -t lustre /dev/mapper/ost-3 /OST3&quot;&lt;/li&gt;
			&lt;/ol&gt;
			&lt;/li&gt;
		&lt;/ol&gt;
		&lt;/li&gt;
	&lt;/ol&gt;
	&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;ssh IO1 &quot;mkfs.lustre --reformat  --fsname=lustre --ost --index=4 \&lt;br/&gt;
							--mgsnode=192.168.2.101@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.101@o2ib1 \&lt;br/&gt;
							--mgsnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--mgsnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							--failnode=192.168.2.102@o2ib0 \&lt;br/&gt;
							--failnode=192.168.3.102@o2ib1 \&lt;br/&gt;
							/dev/mapper/ost-4&quot;&lt;br/&gt;
ssh IO1 &quot;mount -t lustre /dev/mapper/ost-4 /OST4&quot;&lt;/p&gt;

&lt;p&gt;Is the above configuration is correct &lt;/p&gt;

&lt;p&gt;Thank You&lt;br/&gt;
Atul Yadav&lt;/p&gt;</description>
                <environment>Lustre 2.5.2, Cent OS 6.5, Dual IB port.&lt;br/&gt;
DRBD, Heartbeat</environment>
        <key id="25765">LU-5421</key>
            <summary>MGS and MDT with dual ib port</summary>
                <type id="3" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11318&amp;avatarType=issuetype">Task</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="atulyadavtech">Atul Yadav</reporter>
                        <labels>
                    </labels>
                <created>Sun, 27 Jul 2014 06:12:17 +0000</created>
                <updated>Mon, 28 Jul 2014 19:28:27 +0000</updated>
                                            <version>Lustre 2.5.2</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="90126" author="atulyadavtech" created="Sun, 27 Jul 2014 06:21:58 +0000"  >&lt;p&gt;After executing below command, we are getting error &lt;br/&gt;
++++++++++++++++++++++++++++++++++++++&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@IO1 ~&amp;#93;&lt;/span&gt;# modprobe -v lustre&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/net/lustre/libcfs.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/lvfs.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/net/lustre/lnet.ko networks=o2ib0(ib0),o2ib1(ib1)&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/obdclass.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/ptlrpc.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/fld.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/fid.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/mdc.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/osc.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/lov.ko&lt;br/&gt;
insmod /lib/modules/2.6.32-431.17.1.el6_lustre.x86_64/extra/kernel/fs/lustre/lustre.ko&lt;/p&gt;


&lt;p&gt; mkfs.lustre --reformat --fsname=lustre --mdt --mgs --index=0 --failnode=192.168.3.101@o2ib1 --failnode=192.168.2.102@o2ib0 --failnode=192.168.3.102@o2ib1 /dev/drbd0&lt;/p&gt;

&lt;p&gt;   Permanent disk data:&lt;br/&gt;
Target:     lustre:MDT0000&lt;br/&gt;
Index:      0&lt;br/&gt;
Lustre FS:  lustre&lt;br/&gt;
Mount type: ldiskfs&lt;br/&gt;
Flags:      0x65&lt;br/&gt;
              (MDT MGS first_time update )&lt;br/&gt;
Persistent mount opts: user_xattr,errors=remount-ro&lt;br/&gt;
Parameters: failover.node=192.168.3.101@o2ib1 failover.node=192.168.2.102@o2ib failover.node=192.168.3.102@o2ib1&lt;/p&gt;

&lt;p&gt;device size = 380916MB&lt;br/&gt;
formatting backing filesystem ldiskfs on /dev/drbd0&lt;br/&gt;
        target name  lustre:MDT0000&lt;br/&gt;
        4k blocks     97514583&lt;br/&gt;
        options        -J size=400 -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,mmp,dir_nlink,quota,huge_file,flex_bg -E lazy_journal_init -F&lt;br/&gt;
mkfs_cmd = mke2fs -j -b 4096 -L lustre:MDT0000  -J size=400 -I 512 -i 2048 -q -O dirdata,uninit_bg,^extents,mmp,dir_nlink,quota,huge_file,flex_bg -E lazy_journal_init -F /dev/drbd0 97514583&lt;br/&gt;
Writing CONFIGS/mountdata&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@IO1 ~&amp;#93;&lt;/span&gt;# mount -v -t lustre /dev/drbd0 /MDT&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;0&amp;#93;&lt;/span&gt; = /sbin/mount.lustre&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;1&amp;#93;&lt;/span&gt; = -v&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;2&amp;#93;&lt;/span&gt; = -o&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;3&amp;#93;&lt;/span&gt; = rw&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;4&amp;#93;&lt;/span&gt; = /dev/drbd0&lt;br/&gt;
arg&lt;span class=&quot;error&quot;&gt;&amp;#91;5&amp;#93;&lt;/span&gt; = /MDT&lt;br/&gt;
source = /dev/drbd0 (/dev/drbd0), target = /MDT&lt;br/&gt;
options = rw&lt;br/&gt;
checking for existing Lustre data: found&lt;br/&gt;
Reading CONFIGS/mountdata&lt;br/&gt;
Writing CONFIGS/mountdata&lt;br/&gt;
mounting device /dev/drbd0 at /MDT, flags=0x1000000 options=osd=osd-ldiskfs,user_xattr,errors=remount-ro,mgs,virgin,update,param=failover.node=192.168.3.101@o2ib1,param=failover.node=192.168.2.102@o2ib,param=failover.node=192.168.3.102@o2ib1,svname=lustre-MDT0000,device=/dev/drbd0&lt;br/&gt;
mount.lustre: cannot parse scheduler options for &apos;/sys/block/drbd0/queue/scheduler&apos;&lt;br/&gt;
mount.lustre: mount /dev/drbd0 at /MDT failed: Cannot assign requested address retries left: 0&lt;br/&gt;
mount.lustre: mount /dev/drbd0 at /MDT failed: Cannot assign requested address&lt;/p&gt;



&lt;p&gt;+++++++++++++++++++++++++++++++++&lt;/p&gt;

&lt;p&gt;Thank YOU&lt;br/&gt;
Atul Yadav&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                    <customfield id="customfield_10030" key="com.atlassian.jira.plugin.system.customfieldtypes:labels">
                        <customfieldname>Epic/Theme</customfieldname>
                        <customfieldvalues>
                                        <label>DUAl-ib-port</label>
            <label>Lustre-2.5.2</label>
    
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwsef:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15077</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>