<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:52:14 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5526] recovery-mds-scale test failover_mds: dd: No space left on device</title>
                <link>https://jira.whamcloud.com/browse/LU-5526</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While running recovery-mds-scale test failover_mds (MDS failed over 1 time), client load on one of the clients failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2014-08-16 20:44:59: dd run starting
+ mkdir -p /mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com
+ /usr/bin/lfs setstripe -c -1 /mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com
+ cd /mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com
+ sync
++ /usr/bin/lfs df /mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com
++ awk &apos;/filesystem summary:/ {print $5}&apos;
+ FREE_SPACE=14195328
+ BLKS=1596974
+ echoerr &apos;Total free disk space is 14195328, 4k blocks to dd is 1596974&apos;
+ echo &apos;Total free disk space is 14195328, 4k blocks to dd is 1596974&apos;
Total free disk space is 14195328, 4k blocks to dd is 1596974
+ load_pid=3715
+ wait 3715
+ dd bs=4k count=1596974 status=noxfer if=/dev/zero of=/mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com/dd-file
dd: writing `/mnt/lustre/d0.dd-shadow-41vm5.shadow.whamcloud.com/dd-file&apos;: No space left on device
1213957+0 records in
1213956+0 records out
+ &apos;[&apos; 1 -eq 0 &apos;]&apos;
++ date &apos;+%F %H:%M:%S&apos;
+ echoerr &apos;2014-08-16 20:52:06: dd failed&apos;
+ echo &apos;2014-08-16 20:52:06: dd failed&apos;
2014-08-16 20:52:06: dd failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Console log on the client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 3715:0:(vvp_io.c:1081:vvp_io_commit_write()) Write page 1213956 of inode ffff88007ccedb78 failed -28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/acafc288-26a6-11e4-84f2-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/acafc288-26a6-11e4-84f2-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The failure was reported in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3326&quot; title=&quot;recovery-mds-scale test_failover_ost: tar: Cannot open: No space left on device&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3326&quot;&gt;&lt;del&gt;LU-3326&lt;/del&gt;&lt;/a&gt; before. However, Lustre b2_5 build #80 already contained patch &lt;a href=&quot;http://review.whamcloud.com/11425&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11425&lt;/a&gt;. So, more fixup is needed to resolve the failure.&lt;/p&gt;</description>
                <environment>Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/80/&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/80/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64&lt;br/&gt;
Test Group: failover&lt;br/&gt;
FSTYPE=zfs</environment>
        <key id="26094">LU-5526</key>
            <summary>recovery-mds-scale test failover_mds: dd: No space left on device</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="hongchao.zhang">Hongchao Zhang</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                            <label>22pl</label>
                            <label>mq414</label>
                            <label>p4hc</label>
                            <label>zfs</label>
                    </labels>
                <created>Wed, 20 Aug 2014 23:34:30 +0000</created>
                <updated>Wed, 5 Aug 2020 21:48:42 +0000</updated>
                            <resolved>Wed, 5 Aug 2020 21:48:42 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.5.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="92123" author="hongchao.zhang" created="Thu, 21 Aug 2014 11:31:32 +0000"  >&lt;p&gt;the patch has been updated and tracked at &lt;a href=&quot;http://review.whamcloud.com/#/c/8671/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/8671/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="92180" author="yujian" created="Thu, 21 Aug 2014 21:31:09 +0000"  >&lt;p&gt;The failure occurred consistently on Lustre b2_5 branch with FSTYPE=zfs: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/15e2479e-2904-11e4-9362-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/15e2479e-2904-11e4-9362-5254006e85c2&lt;/a&gt;&lt;br/&gt;
The FSTYPE=ldiskfs failover test session is blocked by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5483&quot; title=&quot;recovery-mds-scale test failover_mds: oom failure on client&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5483&quot;&gt;LU-5483&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Hi Hongchao,&lt;br/&gt;
While creating the patch for Lustre b2_5 branch, could you please add the following test parameters to verify the change on FSTYPE=zfs failover test session?&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Test-Parameters: alwaysuploadlogs \
envdefinitions=SLOW=yes,ENABLE_QUOTA=yes,REQFAIL=100,DURATION=28800 \
mdtfilesystemtype=zfs mdsfilesystemtype=zfs ostfilesystemtype=zfs \
clientdistro=el6 ossdistro=el6 mdsdistro=el6 clientarch=x86_64 \
ossarch=x86_64 mdsarch=x86_64 clientcount=4 osscount=2 mdscount=2 \
mdssizegb=10 ostsizegb=10 austeroptions=-R failover=true iscsi=1 \
testlist=recovery-mds-scale
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="92901" author="yujian" created="Sun, 31 Aug 2014 20:48:23 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/86/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/86/&lt;/a&gt; (2.5.3 RC1)&lt;br/&gt;
FSTYPE=zfs&lt;/p&gt;

&lt;p&gt;recovery-mds-scale test hit the same failure: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2f3a6d92-30f8-11e4-a455-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2f3a6d92-30f8-11e4-a455-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="93875" author="sarah" created="Fri, 12 Sep 2014 18:07:05 +0000"  >&lt;p&gt;Hit same error in lustre-master build # 2639 zfs failover test:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2e7e0c1a-37c4-11e4-a2a6-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2e7e0c1a-37c4-11e4-a2a6-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="95288" author="hongchao.zhang" created="Tue, 30 Sep 2014 14:03:52 +0000"  >&lt;p&gt;the patch is tracked at &lt;a href=&quot;http://review.whamcloud.com/#/c/12126/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/12126/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="97917" author="yujian" created="Thu, 30 Oct 2014 06:41:06 +0000"  >&lt;p&gt;More instances on Lustre b2_5 branch with FSTYPE=zfs:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/66474710-5fb0-11e4-a865-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/66474710-5fb0-11e4-a865-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/10035164-7a72-11e4-be53-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/10035164-7a72-11e4-be53-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="105465" author="sarah" created="Tue, 3 Feb 2015 01:28:32 +0000"  >&lt;p&gt;this also affect lustre-master build # 2835 replay-single&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/42a5bd28-a81c-11e4-93dd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/42a5bd28-a81c-11e4-93dd-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="106000" author="hongchao.zhang" created="Fri, 6 Feb 2015 07:06:31 +0000"  >&lt;p&gt;this issue is caused by the slow creation at OST after the recovery of MDT, which caused &quot;osp_precreate_reserve&quot; failed with &quot;-ETIMEDOUT&quot;,&lt;br/&gt;
and LOD return &quot;-ENOSPC&quot; in this case.&lt;/p&gt;

&lt;p&gt;I have tried to double the wait time in &quot;sop_precreate_reserve&quot;, but it still failed, and have created another patch to wait the creation until completion or error,&lt;br/&gt;
the patch is still tracked at &lt;a href=&quot;http://review.whamcloud.com/#/c/12126/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/12126/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="108358" author="hongchao.zhang" created="Mon, 2 Mar 2015 03:05:44 +0000"  >&lt;p&gt;the requests sent to OST after MDS failover spent too much time to complete, &lt;a href=&quot;https://testing.hpdd.intel.com/test_logs/a9f3b136-bfe8-11e4-b6c3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_logs/a9f3b136-bfe8-11e4-b6c3-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: 2810:0:(client.c:1949:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1425098817/real 1425098820]  req@ffff88006c4bc9c0 x1494324416086096/t0(0) o38-&amp;gt;lustre-MDT0000-lwp-MDT0000@10.2.4.167@tcp:12/10 lens 400/544 e 0 to 1 dl 1425098822 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
Lustre: lustre-MDT0000: Will be in recovery for at least 1:00, or until 2 clients reconnect
Lustre: lustre-MDT0000: Recovery over after 0:01, of 2 clients 2 recovered and 0 were evicted.
LustreError: 3165:0:(osp_precreate.c:1393:osp_precreate_reserve()) wait recovery to complete!
LustreError: 2811:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,37 [sent 1425098819/real 1425098819], current 1425098856  req@ffff88006c4bccc0 x1494324416086132/t4294967824(4294967824) o2-&amp;gt;lustre-OST0001-osc-MDT0000@10.2.4.168@tcp:28/4 lens 560/400 e 2 to 0 dl 1425098877 ref 2 fl Interpret:R/0/0 rc 0/0
LustreError: 3165:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,50 [sent 1425098822/real 1425098822], current 1425098872  req@ffff88006fccb980 x1494324416086168/t4294967885(4294967885) o2-&amp;gt;lustre-OST0000-osc-MDT0000@10.2.4.168@tcp:28/4 lens 560/400 e 1 to 0 dl 1425098893 ref 2 fl Interpret:R/0/0 rc 0/0
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) Skipped 2 previous similar messages
LustreError: 2811:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,57 [sent 1425098819/real 1425098819], current 1425098876  req@ffff88006ea53380 x1494324416086152/t4294967855(4294967855) o2-&amp;gt;lustre-OST0001-osc-MDT0000@10.2.4.168@tcp:28/4 lens 560/400 e 3 to 0 dl 1425098902 ref 2 fl Interpret:R/0/0 rc 0/0

...


LustreError: 3165:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,77 [sent 1425098822/real 1425098822], current 1425098899  req@ffff88007d574680 x1494324416086196/t4294967962(4294967962) o2-&amp;gt;lustre-OST0000-osc-MDT0000@10.2.4.168@tcp:28/4 lens 560/400 e 2 to 0 dl 1425098918 ref 2 fl Interpret:R/0/0 rc 0/0
LNet: Service thread pid 3165 completed after 61.52s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) Skipped 5 previous similar messages
LustreError: 3165:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
Lustre: wanted 1, found 0
LustreError: 3165:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
Lustre: wanted 1, found 0
LustreError: 2839:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
Lustre: wanted 1, found 0
LustreError: 2839:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
Lustre: wanted 1, found 0
LustreError: 2841:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
Lustre: wanted 1, found 0
Lustre: wanted 1, found 0
LustreError: 2840:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
LustreError: 2840:0:(osp_precreate.c:1384:osp_precreate_reserve()) Skipped 1 previous similar message
Lustre: wanted 1, found 0
Lustre: wanted 1, found 0
Lustre: Skipped 1 previous similar message
LustreError: 2867:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,289 [sent 1425098846/real 1425098846], current 1425099135  req@ffff88006c7fb080 x1494324416086280/t0(0) o5-&amp;gt;lustre-OST0001-osc-MDT0000@10.2.4.168@tcp:28/4 lens 432/400 e 9 to 0 dl 1425099148 ref 2 fl Interpret:RN/0/0 rc 0/0
LustreError: 3168:0:(osp_precreate.c:1384:osp_precreate_reserve()) expired on waiting for precreated objects
LustreError: 3168:0:(osp_precreate.c:1384:osp_precreate_reserve()) Skipped 3 previous similar messages
LustreError: 2865:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,345 [sent 1425098899/real 1425098899], current 1425099244  req@ffff88006fccb680 x1494324416086820/t0(0) o5-&amp;gt;lustre-OST0000-osc-MDT0000@10.2.4.168@tcp:28/4 lens 432/400 e 5 to 0 dl 1425099251 ref 2 fl Interpret:RN/0/0 rc 0/0
Lustre: wanted 1, found 0
Lustre: Skipped 1 previous similar message
LustreError: 2865:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,26 [sent 1425099257/real 1425099257], current 1425099283  req@ffff88006bba46c0 x1494324416088404/t0(0) o5-&amp;gt;lustre-OST0000-osc-MDT0000@10.2.4.168@tcp:28/4 lens 432/400 e 0 to 0 dl 1425099701 ref 2 fl Interpret:RN/0/0 rc 0/0
LustreError: 2865:0:(client.c:1866:ptlrpc_check_set()) Skipped 1 previous similar message
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) @@@ Request cost too much time,26 [sent 1425099383/real 1425099383], current 1425099409  req@ffff88006bca3980 x1494324416091456/t4294972478(4294972478) o2-&amp;gt;lustre-OST0000-osc-MDT0000@10.2.4.168@tcp:28/4 lens 560/400 e 0 to 0 dl 1425099827 ref 2 fl Interpret:R/4/0 rc 0/0
LustreError: 2812:0:(client.c:1866:ptlrpc_check_set()) Skipped 1 previous similar message
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="116375" author="gerrit" created="Tue, 26 May 2015 13:59:29 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/14934&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/14934&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5526&quot; title=&quot;recovery-mds-scale test failover_mds: dd: No space left on device&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5526&quot;&gt;&lt;del&gt;LU-5526&lt;/del&gt;&lt;/a&gt; llite: retry object creation&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: cde90a2316328562d4c95babe09ab3276f18fdc2&lt;/p&gt;</comment>
                            <comment id="117264" author="gerrit" created="Wed, 3 Jun 2015 12:47:27 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/15122&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15122&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5526&quot; title=&quot;recovery-mds-scale test failover_mds: dd: No space left on device&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5526&quot;&gt;&lt;del&gt;LU-5526&lt;/del&gt;&lt;/a&gt; llite: retry object creation&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: ddd9319367f2373dc32c491e3ff2f1fd7ea2d617&lt;/p&gt;</comment>
                            <comment id="136037" author="standan" created="Fri, 11 Dec 2015 16:19:28 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Hard Failover: EL7 Server/Client&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/56d3054e-9e20-11e5-91b0-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/56d3054e-9e20-11e5-91b0-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139389" author="standan" created="Wed, 20 Jan 2016 01:38:27 +0000"  >&lt;p&gt;Another instance found for hardfailover: EL6.7 Server/Client - ZFS&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/df3903c8-bbd7-11e5-8506-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/df3903c8-bbd7-11e5-8506-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141705" author="standan" created="Tue, 9 Feb 2016 23:56:23 +0000"  >&lt;p&gt;Another instance found for hardfailover : EL7 Server/Client - ZFS, tag 2.7.66, master build 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/f0dd9616-ca6e-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/f0dd9616-ca6e-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="18911">LU-3326</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="24004">LU-4846</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="29640">LU-6493</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="28511">LU-6200</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32681">LU-7309</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="33005">LU-7387</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwu7b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15384</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>