<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:21:35 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2008] After hardware reboot (using pm) the node cannot be accessed</title>
                <link>https://jira.whamcloud.com/browse/LU-2008</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for bobijam &amp;lt;bobijam@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/02a8d976-05c6-11e2-b6a7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/02a8d976-05c6-11e2-b6a7-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_0b failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: replay-ost-single 0b&lt;/p&gt;

&lt;p&gt;11:42:23:== replay-ost-single test 0b: empty replay =========================================================== 11:42:21 (1348425741)&lt;br/&gt;
11:42:34:Failing ost1 on node client-25vm4&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:CMD: client-25vm4 lctl dl&lt;br/&gt;
11:42:34:+ pm -h powerman --off client-25vm4&lt;br/&gt;
11:42:34:pm: warning: server version (2.3.5) != client (2.3.12)&lt;br/&gt;
11:42:34:Command completed successfully&lt;br/&gt;
11:42:34:affected facets:&lt;br/&gt;
11:42:34:+ pm -h powerman --on client-25vm4&lt;br/&gt;
11:42:34:pm: warning: server version (2.3.5) != client (2.3.12)&lt;br/&gt;
11:42:46:Command completed successfully&lt;br/&gt;
11:42:46:CMD: hostname &lt;br/&gt;
11:42:46:pdsh@client-25vm1: gethostbyname(&quot;hostname&quot;) failed&lt;br/&gt;
11:42:46:CMD: hostname &lt;br/&gt;
11:42:46:pdsh@client-25vm1: gethostbyname(&quot;hostname&quot;) failed&lt;br/&gt;
11:42:57:CMD: hostname &lt;br/&gt;
11:42:57:pdsh@client-25vm1: gethostbyname(&quot;hostname&quot;) failed&lt;br/&gt;
11:42:57:CMD: hostname &lt;/p&gt;
</description>
                <environment></environment>
        <key id="16082">LU-2008</key>
            <summary>After hardware reboot (using pm) the node cannot be accessed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yujian">Jian Yu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>HB</label>
                    </labels>
                <created>Sun, 23 Sep 2012 20:24:11 +0000</created>
                <updated>Tue, 15 Oct 2013 13:52:48 +0000</updated>
                            <resolved>Mon, 8 Apr 2013 15:53:46 +0000</resolved>
                                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                    <fixVersion>Lustre 2.4.1</fixVersion>
                    <fixVersion>Lustre 2.5.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="45401" author="bobijam" created="Sun, 23 Sep 2012 20:29:22 +0000"  >&lt;p&gt;Chris,&lt;/p&gt;

&lt;p&gt;Does it related to TT-868, or another issue? The test failed over an OSS (client-25vm4) and lost it forever.&lt;/p&gt;

&lt;p&gt;test parameter are: &lt;/p&gt;

&lt;p&gt;Test-Parameters: fortestonly envdefinitions=SLOW=yes clientcount=4 osscount=2 mdscount=2 austeroptions=-R failover=true useiscsi=true testlist=replay-ost-single&lt;/p&gt;</comment>
                            <comment id="45510" author="jlevi" created="Tue, 25 Sep 2012 08:38:13 +0000"  >&lt;p&gt;This is blocking the work on &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1966&quot; title=&quot;Test failure on test suite replay-ost-single, subtest test_6: Destroys weren&amp;#39;t done in 5 sec&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1966&quot;&gt;&lt;del&gt;LU-1966&lt;/del&gt;&lt;/a&gt; which is also a 2.3 blocker.&lt;/p&gt;</comment>
                            <comment id="45627" author="pjones" created="Thu, 27 Sep 2012 10:00:55 +0000"  >&lt;p&gt;Dropping priority as this is not preventing any testing from happening&lt;/p&gt;</comment>
                            <comment id="50246" author="sarah" created="Thu, 10 Jan 2013 02:07:17 +0000"  >&lt;p&gt;hit this issue again during 2.4 testing:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/6e837946-4809-11e2-8cdc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/6e837946-4809-11e2-8cdc-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="50280" author="keith" created="Thu, 10 Jan 2013 13:58:37 +0000"  >&lt;p&gt;Well I saw &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2599&quot; title=&quot;Failure on test suite recovery-random-scale test_fail_client_mds: gethostbyname failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2599&quot;&gt;&lt;del&gt;LU-2599&lt;/del&gt;&lt;/a&gt; and that it was duped here. &lt;/p&gt;

&lt;p&gt;from &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/8b557c90-4809-11e2-8cdc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/8b557c90-4809-11e2-8cdc-52540035b04c&lt;/a&gt; &lt;br/&gt;
recovery-random-scale&lt;/p&gt;

&lt;p&gt;I took a quick look at the test failure and two things really pop out at me.&lt;/p&gt;

&lt;p&gt;&quot;Duration: 	86400s &quot;  My calculator tell me that is 24 hours. &lt;/p&gt;

&lt;p&gt;&quot;Failure Rate: 78.00% of last 100 executions &lt;span class=&quot;error&quot;&gt;&amp;#91;all branches&amp;#93;&lt;/span&gt; &quot;  I am learning to not trust this Metric but as testing is taking days right now this could be part of the issue.&lt;/p&gt;


&lt;p&gt;From the syslog of the test session:&lt;/p&gt;

&lt;p&gt;The Start:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Dec 15 02:39:30 fat-intel-3vm5 kernel: Lustre: DEBUG MARKER: /usr/sbin/lctl mark Starting failover on mds1
Dec 15 02:39:35 fat-intel-3vm5 xinetd[1587]: EXIT: shell status=0 pid=7402 duration=5(sec)
Dec 15 02:40:33 fat-intel-3vm5 xinetd[1587]: START: shell pid=7430 from=::ffff:10.10.4.86
Dec 15 02:40:33 fat-intel-3vm5 rshd[7431]: root@fat-intel-3vm1.lab.whamcloud.com as root: cmd=&apos;(PATH=$PATH:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin; cd /usr/lib64/lustre/tests; LUSTRE=&quot;/usr/lib64/lustre&quot;  USE_OFD=yes MGSFSTYPE=ldiskfs MDSFSTYPE=ldiskfs OSTFSTYPE=ldiskfs FSTYPE=ldiskfs sh -c &quot;/usr/sbin/lctl mark Starting failover on mds1&quot;);echo XXRETCODE:$?&apos;
Dec 15 02:40:33 fat-intel-3vm5 kernel: Lustre: DEBUG MARKER: Starting failover on mds1
Dec 15 02:40:33 fat-intel-3vm5 xinetd[1587]: EXIT: shell status=0 pid=7430 duration=0(sec)
Dec 15 02:41:20 fat-intel-3vm5 kernel: LustreError: 7107:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 465052 of inode ffff8800765bd638 failed -28
Dec 15 02:43:50 fat-intel-3vm5 kernel: LustreError: 7459:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 465147 of inode ffff8800765bd638 failed -28
Dec 15 02:43:51 fat-intel-3vm5 kernel: LustreError: 7471:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 0 of inode ffff8800765bd638 failed -28
Dec 15 02:46:20 fat-intel-3vm5 kernel: LustreError: 7475:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 464979 of inode ffff8800765bd638 failed -28
Dec 15 02:48:50 fat-intel-3vm5 kernel: LustreError: 7487:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 465103 of inode ffff8800765bd638 failed -28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;A day or so of write page failed errors... &lt;/p&gt;

&lt;p&gt;The end: &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Dec 16 02:33:30 fat-intel-3vm5 kernel: LustreError: 6617:0:(vvp_io.c:1075:vvp_io_commit_write()) Write page 464781 of inode ffff8800765bd638 failed -28
Dec 16 02:33:30 fat-intel-3vm5 kernel: LustreError: 6617:0:(vvp_io.c:1075:vvp_io_commit_write()) Skipped 5 previous similar messages
Dec 16 02:39:32 fat-intel-3vm5 xinetd[1587]: START: shell pid=6666 from=::ffff:10.10.4.86
Dec 16 02:39:32 fat-intel-3vm5 rshd[6667]: autotest@fat-intel-3vm1.lab.whamcloud.com as root: cmd=&apos;/home/autotest/.autotest/dynamic_bash/70041011263340&apos;
Dec 16 02:39:34 fat-intel-3vm5 kernel: SysRq : Show State
Dec 16 02:39:34 fat-intel-3vm5 kernel:  task                        PC stack   pid father 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;I am going to dig into this a bit more but something seems really broken here. &lt;/p&gt;</comment>
                            <comment id="50282" author="keith" created="Thu, 10 Jan 2013 14:40:30 +0000"  >&lt;p&gt;Seems to be a script var issues:&lt;/p&gt;

&lt;p&gt;from The test output (just part of it)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;FAIL CLIENT fat-intel-3vm6...
+ pm -h powerman --off fat-intel-3vm6
pm: warning: server version (2.3.5) != client (2.3.12)
Command completed successfully
Starting failover on mds1
CMD: fat-intel-3vm3 /usr/sbin/lctl dl
Failing  on 
+ pm -h powerman --off
Usage: pm [action] [targets]
-1,--on targets      Power on targets
-0,--off targets     Power off targets
-c,--cycle targets   Power cycle targets
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So we fail Client fat-intel-3vm6 just fine.&lt;/p&gt;

&lt;p&gt;Then we start on the MDS &quot;Starting failover on mds1&quot;&lt;/p&gt;

&lt;p&gt;We get &quot;Failing on &quot;  There is nothing here. &lt;/p&gt;

&lt;p&gt;pm is confused and sending out it usage info because we don&apos;t pass in a host. &lt;/p&gt;

&lt;p&gt;From lustre/tests/recovery-random-scale.sh calles&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
log &quot;Starting failover on $serverfacet&quot;
facet_failover &quot;$serverfacet&quot; || exit 1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt; 

&lt;p&gt;In test-framework.sh facet_failover&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;        for ((index=0; index&amp;lt;$total; index++)); do
                facet=$(echo ${affecteds[index]} | tr -s &quot; &quot; | cut -d&quot;,&quot; -f 1)
                local host=$(facet_active_host $facet)
                echo &quot;Failing ${affecteds[index]} on $host&quot;
                shutdown_facet $facet
        done
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;$host is not set and ${affecteds&lt;span class=&quot;error&quot;&gt;&amp;#91;index&amp;#93;&lt;/span&gt;} is empty.&lt;/p&gt;

&lt;p&gt;Something in the framework is not quite right and I don&apos;t think I am quite the right person to dig all that out.&lt;/p&gt;

&lt;p&gt;I will put a sanity check into shutdown_facet() to keep errors like this from wasting a day of cycles.&lt;/p&gt;</comment>
                            <comment id="50318" author="keith" created="Thu, 10 Jan 2013 22:32:49 +0000"  >&lt;p&gt;Well with the lack of return codes and argument it makes is reasonably complicated to properly detect the error and skip timeout.  &lt;/p&gt;

&lt;p&gt;I noticed there were widespread timout issues with the total run.  7 tests goto timeout. &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://jira.whamcloud.com/browse/TT-1016&quot; class=&quot;external-link&quot; rel=&quot;nofollow&quot;&gt;http://jira.whamcloud.com/browse/TT-1016&lt;/a&gt;  Seems very related. &lt;/p&gt;

&lt;p&gt;The root issue may be occurring at a reasonably high rate and it is eating up lots of cycles on the test cluster. &lt;/p&gt;

&lt;p&gt;When things get off the rails like this we need to have a good way of bailing out. &lt;/p&gt;</comment>
                            <comment id="51606" author="chris" created="Fri, 1 Feb 2013 04:12:17 +0000"  >&lt;p&gt;Below is the configure used by one of the runs. Looks out to me but someone else might like to review and work out what is wrong.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;#!/bin/bash
#Auto Generated By Whamcloud Autotest
#Key Exports
export mgs_HOST=fat-intel-3vm3
export mds_HOST=fat-intel-3vm3
export MGSDEV=/dev/lvm-MDS/P1
export MDSDEV=/dev/lvm-MDS/P1
export mds1_HOST=fat-intel-3vm3
export MDSDEV1=/dev/lvm-MDS/P1
export MDSCOUNT=1
export MDSSIZE=10485760
export MGSSIZE=10485760
export MDSFSTYPE=ldiskfs
export MGSFSTYPE=ldiskfs
export mdsfailover_HOST=fat-intel-3vm7
export mds1failover_HOST=fat-intel-3vm7
export MGSNID=fat-intel-3vm3:fat-intel-3vm7
export FAILURE_MODE=HARD
export POWER_DOWN=&lt;span class=&quot;code-quote&quot;&gt;&quot;pm -h powerman --off&quot;&lt;/span&gt;
export POWER_UP=&lt;span class=&quot;code-quote&quot;&gt;&quot;pm -h powerman --on&quot;&lt;/span&gt;
export ost_HOST=fat-intel-3vm4
export ostfailover_HOST=fat-intel-3vm8
export ost1_HOST=fat-intel-3vm4
export OSTDEV1=/dev/lvm-OSS/P1
export ost1failover_HOST=fat-intel-3vm8
export ost2_HOST=fat-intel-3vm4
export OSTDEV2=/dev/lvm-OSS/P2
export ost2failover_HOST=fat-intel-3vm8
export ost3_HOST=fat-intel-3vm4
export OSTDEV3=/dev/lvm-OSS/P3
export ost3failover_HOST=fat-intel-3vm8
export ost4_HOST=fat-intel-3vm4
export OSTDEV4=/dev/lvm-OSS/P4
export ost4failover_HOST=fat-intel-3vm8
export ost5_HOST=fat-intel-3vm4
export OSTDEV5=/dev/lvm-OSS/P5
export ost5failover_HOST=fat-intel-3vm8
export ost6_HOST=fat-intel-3vm4
export OSTDEV6=/dev/lvm-OSS/P6
export ost6failover_HOST=fat-intel-3vm8
export ost7_HOST=fat-intel-3vm4
export OSTDEV7=/dev/lvm-OSS/P7
export ost7failover_HOST=fat-intel-3vm8
# some setup &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; conf-sanity test 24a, 24b, 33a
export fs2mds_DEV=/dev/lvm-MDS/S1
export fs2ost_DEV=/dev/lvm-OSS/S1
export fs3ost_DEV=/dev/lvm-OSS/S2
export RCLIENTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;fat-intel-3vm6 fat-intel-3vm5&quot;&lt;/span&gt;
export OSTCOUNT=7
export NETTYPE=tcp
export OSTSIZE=2097152
export OSTFSTYPE=ldiskfs
export FSTYPE=ldiskfs
export SHARED_DIRECTORY=/home/autotest/.autotest/shared_dir/2012-12-15/011204-70041009461540
export SLOW=yes
# Adding contents of /home/autotest/autotest/mecturk-standalone.sh
VERBOSE=&lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;

# Entries above here come are created by configurecluster.rb
# Entries below here come from mecturk.h
FSNAME=lustre

TMP=${TMP:-/tmp}

DAEMONSIZE=${DAEMONSIZE:-500}

MDSOPT=${MDSOPT:-&quot;&quot;}
MGSOPT=${MGSOPT:-&quot;&quot;}

# sgpdd-survey requires these to be set. They apprarently have no side affect.
SGPDD_YES=&lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;
REFORMAT=&lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;

# some bits &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; liblustre tcp connecttions
export LNET_ACCEPT_PORT=7988
export ACCEPTOR_PORT=7988

OSTOPT=${OSTOPT:-&quot;&quot;}

STRIPE_BYTES=${STRIPE_BYTES:-1048576}
STRIPES_PER_OBJ=${STRIPES_PER_OBJ:-0}
SINGLEMDS=${SINGLEMDS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;mds1&quot;&lt;/span&gt;}
TIMEOUT=${TIMEOUT:-20}
PTLDEBUG=${PTLDEBUG:-0x33f0404}
DEBUG_SIZE=${DEBUG_SIZE:-32}
SUBSYSTEM=${SUBSYSTEM:- 0xffb7e3ff}

MKFSOPT=&quot;&quot;
MOUNTOPT=&quot;&quot;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$MDSJOURNALSIZE&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=$MKFSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; -J size=$MDSJOURNALSIZE&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$MDSISIZE&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=$MKFSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; -i $MDSISIZE&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$MKFSOPT&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mkfsoptions=\\\&quot;&lt;/span&gt;$MKFSOPT\\\&quot;&quot;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$MDSCAPA&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=&lt;span class=&quot;code-quote&quot;&gt;&quot;--param mdt.capa=$MDSCAPA&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MDSFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;ldiskfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MDSOPT=$MDSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --mountfsoptions=errors=remount-ro,iopen_nopriv,user_xattr,acl&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$mdsfailover_HOST&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MDSOPT=$MDSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --failnode=`h2$NETTYPE $mdsfailover_HOST`&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$STRIPE_BYTES&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MOUNTOPT=$MOUNTOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --param lov.stripesize=$STRIPE_BYTES&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$STRIPES_PER_OBJ&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MOUNTOPT=$MOUNTOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --param lov.stripecount=$STRIPES_PER_OBJ&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$L_GETIDENTITY&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MOUNTOPT=$MOUNTOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --param mdt.identity_upcall=$L_GETIDENTITY&quot;&lt;/span&gt;
# Check &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; wide stripping
[ $OSTCOUNT -gt 160 ] &amp;amp;&amp;amp;
    MDSOPT=$MDSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --mkfsoptions=-O large_xattr -J size=4096&quot;&lt;/span&gt;

MDS_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mdt --fsname=$FSNAME $MKFSOPT $MDSOPT&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MDSFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;ldiskfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MDS_MKFS_OPTS=$MDS_MKFS_OPTS&lt;span class=&quot;code-quote&quot;&gt;&quot; --param sys.timeout=$TIMEOUT --device-size=$MDSSIZE&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MDSFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;zfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MDS_MKFS_OPTS=$MDS_MKFS_OPTS&lt;span class=&quot;code-quote&quot;&gt;&quot; --vdev-size=$MDSSIZE&quot;&lt;/span&gt;

&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; combined_mgs_mds ; then
    [ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MDSCOUNT&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;1&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
        MDS_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mgs $MDS_MKFS_OPTS&quot;&lt;/span&gt;
&lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt;
    MDS_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mgsnode=$MGSNID $MDS_MKFS_OPTS&quot;&lt;/span&gt;
    [ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MGSFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;ldiskfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
        MGS_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mgs --device-size=$MGSSIZE&quot;&lt;/span&gt;
    [ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MGSFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;zfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
        MGS_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mgs --vdev-size=$MGSSIZE&quot;&lt;/span&gt;
fi

&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; [ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MDSDEV1&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;$MGSDEV&quot;&lt;/span&gt; ]; then
    &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; [ &lt;span class=&quot;code-quote&quot;&gt;&quot;$MGSFSTYPE&quot;&lt;/span&gt; == &lt;span class=&quot;code-quote&quot;&gt;&quot;ldiskfs&quot;&lt;/span&gt; ]; then
        MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;-o loop&quot;&lt;/span&gt;}
    &lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt;
        MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-&quot;&quot;}
    fi
&lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt;
    MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-$MDS_MOUNT_OPTS}
fi

MKFSOPT=&quot;&quot;
MOUNTOPT=&quot;&quot;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$OSTJOURNALSIZE&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=$MKFSOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; -J size=$OSTJOURNALSIZE&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$MKFSOPT&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=&lt;span class=&quot;code-quote&quot;&gt;&quot;--mkfsoptions=\\\&quot;&lt;/span&gt;$MKFSOPT\\\&quot;&quot;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$OSSCAPA&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    MKFSOPT=&lt;span class=&quot;code-quote&quot;&gt;&quot;--param ost.capa=$OSSCAPA&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$ostfailover_HOST&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    OSTOPT=$OSTOPT&lt;span class=&quot;code-quote&quot;&gt;&quot; --failnode=`h2$NETTYPE $ostfailover_HOST`&quot;&lt;/span&gt;

OST_MKFS_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;--ost --fsname=$FSNAME --mgsnode=$MGSNID $MKFSOPT $OSTOPT&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;$OSTFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;ldiskfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    OST_MKFS_OPTS=$OST_MKFS_OPTS&lt;span class=&quot;code-quote&quot;&gt;&quot; --param sys.timeout=$TIMEOUT --device-size=$OSTSIZE&quot;&lt;/span&gt;
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;$OSTFSTYPE&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;zfs&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
    OST_MKFS_OPTS=$OST_MKFS_OPTS&lt;span class=&quot;code-quote&quot;&gt;&quot; --vdev-size=$OSTSIZE&quot;&lt;/span&gt;

MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;-o user_xattr,acl&quot;&lt;/span&gt;}
OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-&quot;&quot;}

# TT-430
SERVER_FAILOVER_PERIOD=$((60 * 15))

#RUNAS_ID=840000017
#client
MOUNT=${MOUNT:-/mnt/${FSNAME}}
MOUNT1=${MOUNT1:-$MOUNT}
MOUNT2=${MOUNT2:-${MOUNT}2}
MOUNTOPT=${MOUNTOPT:-&lt;span class=&quot;code-quote&quot;&gt;&quot;-o user_xattr,acl,flock&quot;&lt;/span&gt;}
[ &lt;span class=&quot;code-quote&quot;&gt;&quot;x$RMTCLIENT&quot;&lt;/span&gt; != &lt;span class=&quot;code-quote&quot;&gt;&quot;x&quot;&lt;/span&gt; ] &amp;amp;&amp;amp;
        MOUNTOPT=$MOUNTOPT&lt;span class=&quot;code-quote&quot;&gt;&quot;,remote_client&quot;&lt;/span&gt;
DIR=${DIR:-$MOUNT}
DIR1=${DIR:-$MOUNT1}
DIR2=${DIR2:-$MOUNT2}

&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; [ $UID -ne 0 ]; then
        log &lt;span class=&quot;code-quote&quot;&gt;&quot;running as non-root uid $UID&quot;&lt;/span&gt;
        RUNAS_ID=&lt;span class=&quot;code-quote&quot;&gt;&quot;$UID&quot;&lt;/span&gt;
        RUNAS_GID=`id -g $USER`
        RUNAS=&quot;&quot;
&lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt;
        RUNAS_ID=${RUNAS_ID:-500}
        RUNAS_GID=${RUNAS_GID:-$RUNAS_ID}
        RUNAS=${RUNAS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;runas -u $RUNAS_ID&quot;&lt;/span&gt;}
fi

PDSH=&lt;span class=&quot;code-quote&quot;&gt;&quot;pdsh -t 120 -S -Rrsh -w&quot;&lt;/span&gt;
#PDSH=&lt;span class=&quot;code-quote&quot;&gt;&quot;pdsh -t 120 -S -Rmrsh -w&quot;&lt;/span&gt;
export RSYNC_RSH=rsh
FAILURE_MODE=${FAILURE_MODE:-SOFT} # or HARD
POWER_DOWN=${POWER_DOWN:-&lt;span class=&quot;code-quote&quot;&gt;&quot;powerman --off&quot;&lt;/span&gt;}
POWER_UP=${POWER_UP:-&lt;span class=&quot;code-quote&quot;&gt;&quot;powerman --on&quot;&lt;/span&gt;}
SLOW=${SLOW:-no}
FAIL_ON_ERROR=${FAIL_ON_ERROR:-&lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;}

# error: conf_param: No such device&quot; issue in every test suite logs
# sanity-quota test_32 hash_lqs_cur_bits isnt set properly
QUOTA_TYPE=${QUOTA_TYPE:-&lt;span class=&quot;code-quote&quot;&gt;&quot;ug3&quot;&lt;/span&gt;}
QUOTA_USERS=${QUOTA_USERS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;quota_usr quota_2usr sanityusr sanityusr1&quot;&lt;/span&gt;}
LQUOTAOPTS=${LQUOTAOPTS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;hash_lqs_cur_bits=3&quot;&lt;/span&gt;}

# SKIP: parallel-scale test_compilebench compilebench not found
# SKIP: parallel-scale test_connectathon connectathon dir not found
# ------
cbench_DIR=/usr/bin
cnt_DIR=/opt/connectathon

MPIRUN=$(which mpirun 2&amp;gt;/dev/&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;) || &lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;
MPIRUN_OPTIONS=&lt;span class=&quot;code-quote&quot;&gt;&quot;-mca boot ssh&quot;&lt;/span&gt;
MPI_USER=${MPI_USER:-mpiuser}
SINGLECLIENT=$(hostname)
#cbench_DIR=/data/src/benchmarks/compilebench.hg
#cnt_DIR=/data/src/benchmarks/cthon04

# For multiple clients testing, we need use the cfg/ncli.sh config file, and
# only need specify the &lt;span class=&quot;code-quote&quot;&gt;&quot;RCLIENTS&quot;&lt;/span&gt; variable. The &lt;span class=&quot;code-quote&quot;&gt;&quot;CLIENTS&quot;&lt;/span&gt; and &lt;span class=&quot;code-quote&quot;&gt;&quot;CLIENTCOUNT&quot;&lt;/span&gt;
# variables are defined in init_clients_lists(), which is called from cfg/ncli.sh.
# So, &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; we add the contents of cfg/ncli.sh into autotest_config.sh, we would not
# need specify &lt;span class=&quot;code-quote&quot;&gt;&quot;CLIENTS&quot;&lt;/span&gt; and &lt;span class=&quot;code-quote&quot;&gt;&quot;CLIENTCOUNT&quot;&lt;/span&gt;, and the above two issues (#3 and #4) would also be fixed.
# Start of contents of cfg/ncli.sh
CLIENT1=${CLIENT1:-`hostname`}
SINGLECLIENT=$CLIENT1
RCLIENTS=${RCLIENTS:-&quot;&quot;}

init_clients_lists

[ -n &lt;span class=&quot;code-quote&quot;&gt;&quot;$RCLIENTS&quot;&lt;/span&gt; -a &lt;span class=&quot;code-quote&quot;&gt;&quot;$PDSH&quot;&lt;/span&gt; = &lt;span class=&quot;code-quote&quot;&gt;&quot;no_dsh&quot;&lt;/span&gt; ] &amp;amp;&amp;amp; \
                error &lt;span class=&quot;code-quote&quot;&gt;&quot;tests &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; remote clients $RCLIENTS needs pdsh != do_dsh &quot;&lt;/span&gt; || &lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;

[ -n &lt;span class=&quot;code-quote&quot;&gt;&quot;$FUNCTIONS&quot;&lt;/span&gt; ] &amp;amp;&amp;amp; . $FUNCTIONS || &lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;

# &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; recovery scale tests
# &lt;span class=&quot;code-keyword&quot;&gt;default&lt;/span&gt; boulder cluster iozone location
export PATH=/opt/iozone/bin:$PATH

LOADS=${LOADS:-&lt;span class=&quot;code-quote&quot;&gt;&quot;dd tar dbench iozone&quot;&lt;/span&gt;}
&lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; i in $LOADS; &lt;span class=&quot;code-keyword&quot;&gt;do&lt;/span&gt;
    [ -f $LUSTRE/tests/run_${i}.sh ] || \
        error &lt;span class=&quot;code-quote&quot;&gt;&quot;incorrect load: $i&quot;&lt;/span&gt;
done
CLIENT_LOADS=($LOADS)
# End of contents of cfg/ncli.sh
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="51607" author="chris" created="Fri, 1 Feb 2013 04:13:04 +0000"  >&lt;p&gt;Has anyone set this test up manually and run it?&lt;/p&gt;</comment>
                            <comment id="54347" author="pjones" created="Tue, 19 Mar 2013 05:43:20 +0000"  >&lt;p&gt;Yu Jian will investigate the problems in this area&lt;/p&gt;</comment>
                            <comment id="54572" author="yujian" created="Thu, 21 Mar 2013 16:00:23 +0000"  >&lt;p&gt;There is a common issue in lustre-initialization-1 reports in the above test sessions. After formatting all of the server targets, mounting them hit the following issues:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;11:37:12:Setup mgs, mdt, osts
11:37:12:CMD: client-25vm3 mkdir -p /mnt/mds1
11:37:12:CMD: client-25vm3 test -b /dev/lvm-MDS/P1
11:37:12:Starting mds1: -o user_xattr,acl  /dev/lvm-MDS/P1 /mnt/mds1
11:37:12:CMD: client-25vm3 mkdir -p /mnt/mds1; mount -t lustre -o user_xattr,acl  		                   /dev/lvm-MDS/P1 /mnt/mds1
11:37:43:   e2label: MMP: device currently active while trying to open /dev/dm-0
11:37:43:   MMP error info: last update: Sun Sep 23 11:37:37 2012
11:37:43:    node: client-25vm3.lab.whamcloud.com device: dm-0
11:37:43:CMD: client-25vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/1.4-gcc/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;0x33f0404\&quot; \&quot; 0xffb7e3ff\&quot; 32 
11:37:43:CMD: client-25vm3 e2label /dev/lvm-MDS/P1 2&amp;gt;/dev/null
11:37:43:Started lustre:MDT0000
11:37:43:CMD: client-25vm4 mkdir -p /mnt/ost1
11:37:43:CMD: client-25vm4 test -b /dev/lvm-OSS/P1
11:37:43:Starting ost1:   /dev/lvm-OSS/P1 /mnt/ost1
11:37:43:CMD: client-25vm4 mkdir -p /mnt/ost1; mount -t lustre   		                   /dev/lvm-OSS/P1 /mnt/ost1
11:38:14:   e2label: MMP: device currently active while trying to open /dev/dm-0
11:38:14:   MMP error info: last update: Sun Sep 23 11:38:09 2012
11:38:15:    node: client-25vm4.lab.whamcloud.com device: dm-0
11:38:15:CMD: client-25vm4 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/1.4-gcc/bin:/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;0x33f0404\&quot; \&quot; 0xffb7e3ff\&quot; 32 
11:38:15:CMD: client-25vm4 e2label /dev/lvm-OSS/P1 2&amp;gt;/dev/null
11:38:15:Started lustre:OST0000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The labels of server target devices were &quot;lustre:MDT0000&quot;, &quot;lustre:OST0000&quot;, etc., instead of &quot;lustre-MDT0000&quot;, &quot;lustre-OST0000&quot;, which caused facet_up() always return false, and then affected_facets() always returned empty under HARD failure mode.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2415&quot; title=&quot;recovery-mds-scale test_failover_mds: lustre:MDT0000/recovery_status found no match&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2415&quot;&gt;&lt;del&gt;LU-2415&lt;/del&gt;&lt;/a&gt; also has the same issue.&lt;/p&gt;</comment>
                            <comment id="54918" author="yujian" created="Wed, 27 Mar 2013 15:12:23 +0000"  >&lt;p&gt;This is a Lustre issue on master branch. Mounting an ldiskfs server target with MMP feature enabled will fail at ldiskfs_label_lustre() which uses e2label:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@fat-amd-2 ~]# mkfs.lustre --mgsnode=client-1@tcp:client-3@tcp --fsname=lustre --ost --index=0 --failnode=fat-amd-3@tcp --param=sys.timeout=20 --backfstype=ldiskfs --device-size=16000000 --quiet --reformat /dev/disk/by-id/scsi-1IET_00020001

   Permanent disk data:
Target:     lustre:OST0000
Index:      0
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x62
              (OST first_time update )
Persistent mount opts: errors=remount-ro
Parameters: mgsnode=10.10.4.1@tcp:10.10.4.3@tcp failover.node=10.10.4.134@tcp sys.timeout=20

[root@fat-amd-2 ~]# e2label /dev/disk/by-id/scsi-1IET_00020001
lustre:OST0000

[root@fat-amd-2 ~]# mkdir -p /mnt/ost1; mount -t lustre /dev/disk/by-id/scsi-1IET_00020001 /mnt/ost1
   e2label: MMP: device currently active while trying to open /dev/sdf
   MMP error info: last update: Wed Mar 27 07:29:05 2013
    node: fat-amd-2 device: sdf

[root@fat-amd-2 ~]# e2label /dev/disk/by-id/scsi-1IET_00020001
lustre:OST0000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="54993" author="yujian" created="Thu, 28 Mar 2013 09:54:48 +0000"  >&lt;p&gt;The issue was introduced by &lt;a href=&quot;http://review.whamcloud.com/3611&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3611&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Patch for master branch is in &lt;a href=&quot;http://review.whamcloud.com/5867&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5867&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="55758" author="pjones" created="Mon, 8 Apr 2013 15:53:46 +0000"  >&lt;p&gt;Landed for 2.4&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="16828">LU-2415</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzv3mn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>4110</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>