<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:04:06 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6883] replay-single test 73a hang and timeout: umount</title>
                <link>https://jira.whamcloud.com/browse/LU-6883</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;replay-single test 73a is hanging on umount and timesout. Logs for these failures all take place during review-dne-part-2 and are at:&lt;br/&gt;
2015-07-17 16:05:47 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0c6e08a4-2ce8-11e5-804c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0c6e08a4-2ce8-11e5-804c-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-17 20:34:00 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/9ce10940-2d10-11e5-b883-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/9ce10940-2d10-11e5-b883-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-18 11:21:11 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/de25482e-2d8a-11e5-a112-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/de25482e-2d8a-11e5-a112-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-18 12:32:17 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/cf00fd84-2d8f-11e5-b883-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/cf00fd84-2d8f-11e5-b883-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-18 21:50:59 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7975eb6c-2dea-11e5-a0dd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7975eb6c-2dea-11e5-a0dd-5254006e85c2&lt;/a&gt;&lt;/p&gt;


&lt;p&gt;On the client console, we have an error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;14:37:16:Lustre: DEBUG MARKER: == replay-single test 73a: open(O_CREAT), unlink, replay, reconnect before open replay, close == 13:36:56 (1437226616)
14:37:16:Lustre: DEBUG MARKER: mcreate /mnt/lustre/fsa-$(hostname); rm /mnt/lustre/fsa-$(hostname)
14:37:16:Lustre: DEBUG MARKER: if [ -d /mnt/lustre2 ]; then mcreate /mnt/lustre2/fsa-$(hostname); rm /mnt/lustre2/fsa-$(hostname); fi
14:37:16:Lustre: DEBUG MARKER: local REPLAY BARRIER on lustre-MDT0000
14:37:16:LustreError: 11-0: lustre-MDT0000-mdc-ffff880037d86400: operation obd_ping to node 10.1.5.72@tcp failed: rc = -107
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;Trouble seems to be on the MDS1:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;13:37:27:Lustre: DEBUG MARKER: == replay-single test 73a: open(O_CREAT), unlink, replay, reconnect before open replay, close == 13:36:56 (1437226616)
13:37:27:Lustre: DEBUG MARKER: sync; sync; sync
13:37:27:Lustre: DEBUG MARKER: /usr/sbin/lctl --device lustre-MDT0000 notransno
13:37:27:Lustre: DEBUG MARKER: /usr/sbin/lctl --device lustre-MDT0000 readonly
13:37:27:LustreError: 14068:0:(osd_handler.c:1380:osd_ro()) *** setting lustre-MDT0000 read-only ***
13:37:27:LustreError: 14068:0:(osd_handler.c:1380:osd_ro()) Skipped 2 previous similar messages
13:37:27:Turning device dm-0 (0xfd00000) read-only
13:37:27:Lustre: DEBUG MARKER: /usr/sbin/lctl mark mds1 REPLAY BARRIER on lustre-MDT0000
13:37:27:Lustre: DEBUG MARKER: mds1 REPLAY BARRIER on lustre-MDT0000
13:37:27:Lustre: DEBUG MARKER: lctl set_param fail_loc=0x80000302
13:37:27:Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
13:37:27:Lustre: DEBUG MARKER: umount -d /mnt/mds1
13:37:27:Lustre: Failing over lustre-MDT0000
13:37:27:Lustre: Skipped 6 previous similar messages
13:37:27:Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
13:37:27:Lustre: Skipped 7 previous similar messages
13:37:27:Lustre: lustre-MDT0000: Not available for connect from 10.1.5.71@tcp (stopping)
13:37:27:Lustre: Skipped 5 previous similar messages
13:37:27:LustreError: 2870:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880058e879c0 x1507036455088328/t0(0) o13-&amp;gt;lustre-OST0001-osc-MDT0000@10.1.5.71@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
13:37:27:LustreError: 2870:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 7 previous similar messages
13:37:27:LustreError: 14256:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff8800592c46c0 x1507036455088364/t0(0) o1000-&amp;gt;lustre-MDT0001-osp-MDT0000@10.1.5.64@tcp:24/4 lens 248/16608 e 0 to 0 dl 0 ref 2 fl Rpc:/0/ffffffff rc 0/-1
13:37:27:LustreError: 14256:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 7 previous similar messages
13:37:27:LustreError: 14256:0:(osp_object.c:586:osp_attr_get()) lustre-MDT0001-osp-MDT0000:osp_attr_get update error [0x240000403:0x1:0x0]: rc = -5
13:37:27:LustreError: 14256:0:(osp_object.c:586:osp_attr_get()) Skipped 2 previous similar messages
13:37:27:LustreError: 14256:0:(llog.c:180:llog_cancel_rec()) lustre-MDT0001-osp-MDT0000: fail to write header for llog #0x1:1073742851#00000000: rc = -5
13:37:27:Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
13:37:27:Lustre: Skipped 7 previous similar messages
13:37:27:Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
13:37:27:Lustre: Skipped 1 previous similar message
13:37:27:Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
13:37:27:Lustre: Skipped 15 previous similar messages
14:37:08:********** Timeout by autotest system **********
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;From the MDS syslog:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jul 18 13:36:58 shadow-26vm12 mrshd[14211]: root@shadow-26vm10.shadow.whamcloud.com as root: cmd=&apos;/usr/sbin/lctl mark &quot;umount -d /mnt/mds1&quot;;echo XXRETCODE:$?&apos;
Jul 18 13:36:58 shadow-26vm12 kernel: Lustre: DEBUG MARKER: umount -d /mnt/mds1
Jul 18 13:36:58 shadow-26vm12 xinetd[1492]: EXIT: mshell status=0 pid=14210 duration=0(sec)
Jul 18 13:36:58 shadow-26vm12 xinetd[1492]: START: mshell pid=14233 from=::ffff:10.1.5.70
Jul 18 13:36:58 shadow-26vm12 mrshd[14234]: root@shadow-26vm10.shadow.whamcloud.com as root: cmd=&apos;(PATH=$PATH:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin; cd /usr/lib64/lustre/tests; LUSTRE=&quot;/usr/lib64/lustre&quot; sh -c &quot;umount -d /mnt/mds1&quot;);echo XXRETCODE:$?&apos;
Jul 18 13:36:58 shadow-26vm12 kernel: Lustre: Failing over lustre-MDT0000
Jul 18 13:36:58 shadow-26vm12 kernel: Lustre: Skipped 6 previous similar messages
Jul 18 13:37:02 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
Jul 18 13:37:02 shadow-26vm12 kernel: Lustre: Skipped 7 previous similar messages
Jul 18 13:37:02 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.71@tcp (stopping)
Jul 18 13:37:02 shadow-26vm12 kernel: Lustre: Skipped 5 previous similar messages
Jul 18 13:37:03 shadow-26vm12 kernel: LustreError: 2870:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880058e879c0 x1507036455088328/t0(0) o13-&amp;gt;lustre-OST0001-osc-MDT0000@10.1.5.71@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
Jul 18 13:37:03 shadow-26vm12 kernel: LustreError: 2870:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 7 previous similar messages
Jul 18 13:37:04 shadow-26vm12 kernel: LustreError: 14256:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff8800592c46c0 x1507036455088364/t0(0) o1000-&amp;gt;lustre-MDT0001-osp-MDT0000@10.1.5.64@tcp:24/4 lens 248/16608 e 0 to 0 dl 0 ref 2 fl Rpc:/0/ffffffff rc 0/-1
Jul 18 13:37:04 shadow-26vm12 kernel: LustreError: 14256:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 7 previous similar messages
Jul 18 13:37:04 shadow-26vm12 kernel: LustreError: 14256:0:(osp_object.c:586:osp_attr_get()) lustre-MDT0001-osp-MDT0000:osp_attr_get update error [0x240000403:0x1:0x0]: rc = -5
Jul 18 13:37:04 shadow-26vm12 kernel: LustreError: 14256:0:(osp_object.c:586:osp_attr_get()) Skipped 2 previous similar messages
Jul 18 13:37:04 shadow-26vm12 kernel: LustreError: 14256:0:(llog.c:180:llog_cancel_rec()) lustre-MDT0001-osp-MDT0000: fail to write header for llog #0x1:1073742851#00000000: rc = -5
Jul 18 13:37:05 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:37:05 shadow-26vm12 kernel: Lustre: Skipped 7 previous similar messages
Jul 18 13:37:12 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
Jul 18 13:37:12 shadow-26vm12 kernel: Lustre: Skipped 1 previous similar message
Jul 18 13:37:17 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
Jul 18 13:37:17 shadow-26vm12 kernel: Lustre: Skipped 15 previous similar messages
Jul 18 13:37:18 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:37:25 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:37:25 shadow-26vm12 kernel: Lustre: Skipped 29 previous similar messages
Jul 18 13:37:34 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 16 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:37:42 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
Jul 18 13:37:42 shadow-26vm12 kernel: Lustre: Skipped 49 previous similar messages
Jul 18 13:37:45 shadow-26vm12 ntpd[2234]: can&apos;t open /var/log/ntpstats/loopstats.20150718: Permission denied
Jul 18 13:38:06 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 32 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:38:15 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:38:15 shadow-26vm12 kernel: Lustre: Skipped 109 previous similar messages
Jul 18 13:39:10 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 64 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:39:20 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:39:20 shadow-26vm12 kernel: Lustre: Skipped 207 previous similar messages
Jul 18 13:39:53 shadow-26vm12 ntpd[2234]: can&apos;t open /var/log/ntpstats/loopstats.20150718: Permission denied
Jul 18 13:41:18 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 128 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:41:30 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:41:30 shadow-26vm12 kernel: Lustre: Skipped 415 previous similar messages
Jul 18 13:45:34 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 256 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:45:47 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
Jul 18 13:45:47 shadow-26vm12 kernel: Lustre: Skipped 817 previous similar messages
Jul 18 13:54:06 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 512 seconds. The obd refcount = 5. Is it stuck?
Jul 18 13:54:20 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.69@tcp (stopping)
Jul 18 13:54:20 shadow-26vm12 kernel: Lustre: Skipped 1645 previous similar messages
Jul 18 14:04:20 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.70@tcp (stopping)
Jul 18 14:04:20 shadow-26vm12 kernel: Lustre: Skipped 1920 previous similar messages
Jul 18 14:11:10 shadow-26vm12 kernel: Lustre: lustre-MDT0000 is waiting for obd_unlinked_exports more than 1024 seconds. The obd refcount = 5. Is it stuck?
Jul 18 14:12:53 shadow-26vm12 ntpd[2234]: can&apos;t open /var/log/ntpstats/loopstats.20150718: Permission denied
Jul 18 14:14:22 shadow-26vm12 kernel: Lustre: lustre-MDT0000: Not available for connect from 10.1.5.64@tcp (stopping)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</description>
                <environment>review-dne-part-2 in autotest</environment>
        <key id="31147">LU-6883</key>
            <summary>replay-single test 73a hang and timeout: umount</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="laisiyao">Lai Siyao</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Mon, 20 Jul 2015 18:21:52 +0000</created>
                <updated>Wed, 26 Aug 2015 07:47:10 +0000</updated>
                            <resolved>Tue, 25 Aug 2015 21:17:30 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="121829" author="jay" created="Tue, 21 Jul 2015 16:22:57 +0000"  >&lt;p&gt;Another occurrences: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/08854c48-2ce8-11e5-804c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/08854c48-2ce8-11e5-804c-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="121864" author="jamesanunez" created="Tue, 21 Jul 2015 19:20:09 +0000"  >&lt;p&gt;More failures for master during review-dne-part-2:&lt;br/&gt;
2015-07-20 08:16:36 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7f9b89a2-2f02-11e5-92dd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7f9b89a2-2f02-11e5-92dd-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-21 01:53:32 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/052b8fe6-2fa7-11e5-97d6-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/052b8fe6-2fa7-11e5-97d6-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-21 04:00:17 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/63068e3c-2fa6-11e5-ad00-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/63068e3c-2fa6-11e5-ad00-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-23 20:25:45 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/328ddfe4-31c7-11e5-84cf-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/328ddfe4-31c7-11e5-84cf-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="122547" author="bogl" created="Wed, 29 Jul 2015 15:12:39 +0000"  >&lt;p&gt;another:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/46c38a6e-3586-11e5-be21-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/46c38a6e-3586-11e5-be21-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="122690" author="yujian" created="Thu, 30 Jul 2015 16:01:16 +0000"  >&lt;p&gt;More failure instance on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d3ae5ba6-3659-11e5-84a9-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d3ae5ba6-3659-11e5-84a9-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="122712" author="jamesanunez" created="Thu, 30 Jul 2015 16:36:41 +0000"  >&lt;p&gt;A couple more:&lt;br/&gt;
2015-07-29 09:11:10 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a14860c0-361e-11e5-b91d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a14860c0-361e-11e5-b91d-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-07-29 15:16:00 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2a79fe4c-364e-11e5-830b-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2a79fe4c-364e-11e5-830b-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="122737" author="pjones" created="Thu, 30 Jul 2015 17:32:01 +0000"  >&lt;p&gt;Lai&lt;/p&gt;

&lt;p&gt;Could you please look into this failure? This is causing several review failures per day atm&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="123615" author="adilger" created="Fri, 7 Aug 2015 18:18:18 +0000"  >&lt;p&gt;Lai, any update on this issue?&lt;/p&gt;</comment>
                            <comment id="123804" author="laisiyao" created="Tue, 11 Aug 2015 01:40:07 +0000"  >&lt;p&gt;I couldn&apos;t reproduce in local test system, and because this is a timeout issue, the debug log doesn&apos;t contain any useful information. I&apos;ve asked Di to help look into it.&lt;/p&gt;</comment>
                            <comment id="125055" author="jgmitter" created="Tue, 25 Aug 2015 17:44:54 +0000"  >&lt;p&gt;Hi Di,&lt;br/&gt;
Have you had a chance at all to look into this issue?  Any input you may have?  We are not seeing this particular failure in testing for several weeks.&lt;br/&gt;
Thanks.&lt;br/&gt;
Joe&lt;/p&gt;</comment>
                            <comment id="125122" author="di.wang" created="Tue, 25 Aug 2015 21:15:35 +0000"  >&lt;p&gt;Hmm, there are not enough information for me there to know the exact reason. But it looks like this failures is caused by something in replay-single.sh 70b, and probably the recent fixes from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6904&quot; title=&quot;linkea prepare in mdt_reint_rename cause deadlock in 24 hours failover&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6904&quot;&gt;&lt;del&gt;LU-6904&lt;/del&gt;&lt;/a&gt; and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6924&quot; title=&quot;remote regular file are missing after recovery.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6924&quot;&gt;&lt;del&gt;LU-6924&lt;/del&gt;&lt;/a&gt; already fix this issue. Let&apos;s close this issue for now if there are no failures for several weeks, and reopen it if we see it again. Thanks&lt;/p&gt;</comment>
                            <comment id="125125" author="pjones" created="Tue, 25 Aug 2015 21:17:30 +0000"  >&lt;p&gt;Thanks Di!&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="31219">LU-6904</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="31262">LU-6924</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxikn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>