<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:10:51 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7662] lfsck don&apos;t complete</title>
                <link>https://jira.whamcloud.com/browse/LU-7662</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Error happened during lfsck run of soak FS using build &apos;20160108&apos;. (see &lt;a href=&quot;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-20160108&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-20160108&lt;/a&gt;) &lt;br/&gt;
DNE is enabled. &lt;/p&gt;

&lt;ul&gt;
	&lt;li&gt;&lt;tt&gt;lfsck&lt;/tt&gt; started on MDS hosting mdt-0:
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-8 ~]# date; lctl lfsck_start -M soaked-MDT0000 -s 1000 -t all -A ; date
Wed Jan 13 04:42:28 PST 2016
Started LFSCK on the device soaked-MDT0000: scrub layout namespace
Wed Jan 13 04:42:28 PST 2016
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&lt;b&gt;No&lt;/b&gt; soak test was running&lt;/p&gt;&lt;/li&gt;
	&lt;li&gt;lfsck_namespace don&apos;t complete phase &lt;em&gt;scanning-phase2&lt;/em&gt;&lt;/li&gt;
	&lt;li&gt;MDSes &lt;tt&gt;lola-9,11&lt;/tt&gt; showed an increasing number of blocked &lt;tt&gt;mdt_out*&lt;/tt&gt; - threads&lt;/li&gt;
	&lt;li&gt;Triggering stack trace lead kernel panic on &lt;tt&gt;lola-11&lt;/tt&gt; (2016-01-13-08:15:22)&lt;/li&gt;
	&lt;li&gt;All MDSes show only minimal utilization of system resources&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Attached files:&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;console, messages files of lola-9,11; containing stack trace information&lt;/li&gt;
	&lt;li&gt;vmcore-dmesg.txt of lola-11&lt;/li&gt;
	&lt;li&gt;&lt;tt&gt;lfsck&lt;/tt&gt; status information of all MDTs&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Crash file location see next comment.&lt;/p&gt;</description>
                <environment>lola&lt;br/&gt;
build: master, 2.7.64-81-g6fc8da4, 6fc8da41f2ff5156639e89f379adcdbb73ac8567</environment>
        <key id="34098">LU-7662</key>
            <summary>lfsck don&apos;t complete</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="heckes">Frank Heckes</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Wed, 13 Jan 2016 21:13:52 +0000</created>
                <updated>Fri, 29 Jan 2016 16:14:46 +0000</updated>
                            <resolved>Fri, 29 Jan 2016 15:48:04 +0000</resolved>
                                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="138852" author="heckes" created="Wed, 13 Jan 2016 21:20:56 +0000"  >&lt;p&gt;Crash file has been saved to &lt;tt&gt;lola-1:/scratch/crashdumps/lu-7662/lola-11-127.0.0.1-2016-01-13-08:15:22&lt;/tt&gt;&lt;/p&gt;</comment>
                            <comment id="138920" author="heckes" created="Thu, 14 Jan 2016 15:44:18 +0000"  >&lt;p&gt;I tried to stop &lt;tt&gt;lfsck&lt;/tt&gt; by running &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-8 ~]#  lctl lfsck_stop -M soaked-MDT0000 -A
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;on node hosting mdt-0. &lt;br/&gt;
Operation is stall (for hours):&lt;/p&gt;
 &lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;----------------
lola-8
----------------
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root      30223  0.1  0.0      0     0 ?        S    Jan13   2:32 [lfsck]
root      30226  0.0  0.0      0     0 ?        S    Jan13   1:04 [lfsck_namespace]
root      30227  0.1  0.0      0     0 ?        S    Jan13   2:48 [lfsck]
root      30230  0.0  0.0      0     0 ?        S    Jan13   1:30 [lfsck_namespace]
root      42585  0.0  0.0  11140   684 pts/0    S+   04:52   0:00 lctl lfsck_stop -M soaked-MDT0000 -A
----------------
lola-9
----------------
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root      15157  0.1  0.0      0     0 ?        S    Jan13   2:33 [lfsck]
root      15158  0.1  0.0      0     0 ?        S    Jan13   2:31 [lfsck]
root      15163  0.0  0.0      0     0 ?        S    Jan13   1:11 [lfsck_namespace]
root      15164  0.0  0.0      0     0 ?        S    Jan13   0:53 [lfsck_namespace]
----------------
lola-10
----------------
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root      19219  0.1  0.0      0     0 ?        S    Jan13   2:30 [lfsck]
root      19220  0.1  0.0      0     0 ?        S    Jan13   2:40 [lfsck]
root      19225  0.0  0.0      0     0 ?        S    Jan13   0:59 [lfsck_namespace]
root      19226  0.0  0.0      0     0 ?        S    Jan13   0:57 [lfsck_namespace]
----------------
lola-11
----------------
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root       6170  0.0  0.0      0     0 ?        S    Jan13   0:00 [lfsck]
root       6172  0.0  0.0      0     0 ?        S    Jan13   0:09 [lfsck_namespace]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Huge number of mdt_out - threads are blocked on &lt;tt&gt;lola-9,11&lt;/tt&gt;:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-16 soaked]# pdsh -g mds &apos;uptime&apos; | dshbak -c
----------------
lola-8
----------------
 07:47:57 up 3 days,  6:18,  1 user,  load average: 1.47, 1.36, 1.33
----------------
lola-9
----------------
 07:47:57 up 4 days, 20:49,  0 users,  load average: 198.46, 198.44, 198.39
----------------
lola-10
----------------
 07:47:57 up 5 days,  7:52,  0 users,  load average: 0.52, 0.44, 0.37
----------------
lola-11
----------------
 07:47:57 up 23:25,  1 user,  load average: 195.07, 195.23, 194.51

---------------------------------------------------------------------

[root@lola-11 crash]# uptime
 07:38:43 up 23:15,  1 user,  load average: 195.39, 194.76, 193.83
[root@lola-11 crash]# ps aux | grep &apos;D &apos; | grep -v grep | wc -l
196
[root@lola-11 crash]# ps aux | grep &apos;D &apos; | grep -v grep | grep &apos;mdt_out&apos; | wc -l
196
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Typical dump file contains (see attached debug log) a continous stream of CONN -&amp;gt; DISCONN and vice versa messages.&lt;/p&gt;</comment>
                            <comment id="140009" author="yong.fan" created="Tue, 26 Jan 2016 10:07:48 +0000"  >&lt;p&gt;This ticket contains server issues. Before the LFSCK, there were a lot of DNE tests on the lola cluster, and it is unknown the system consistency.&lt;/p&gt;

&lt;p&gt;According to the log lfsck-info.txt.bz2, there were some lfsck failures, but without further logs, we do not know what happened (related LFSCK debug logs have been overwritten).&lt;/p&gt;

&lt;p&gt;In the log console-lola-9.log.bz2, the namespace LFSCK was in double scan to verify some remote directory&apos;s linkEA. For that, the namespace LFSCK needs to locate the remote parent via OUT RPC, such RPC is uninterruptible. If the remote MDT did not handle such RPC (or the RPC handler was blocked on remote MDT), then lfsck_stop would hung there. That is why the &quot;lfsck_stop&quot; did not work. It is known trouble, and has been resolved by the patches &lt;a href=&quot;http://review.whamcloud.com/17032/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17032/&lt;/a&gt; and &lt;a href=&quot;http://review.whamcloud.com/#/c/18082/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18082/&lt;/a&gt;. These two patches allow the LFSCK to be stopped even if related RPC are blocked.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lfsck_namespa S 0000000000000009     0 15163      2 0x00000080^M
 ffff880336279540 0000000000000046 0000000000000000 000000005696abdc^M
 0000000000000000 0000000000000000 00013ca15062f3e9 ffffffffa0a77805^M
 ffff880700000018 0000000114be3200 ffff88033d7ec5f8 ffff880336279fd8^M
Call Trace:^M
 [&amp;lt;ffffffffa0a77805&amp;gt;] ? ptl_send_rpc+0x685/0xea0 [ptlrpc]^M
 [&amp;lt;ffffffff8152b222&amp;gt;] schedule_timeout+0x192/0x2e0^M
 [&amp;lt;ffffffff81087540&amp;gt;] ? process_timeout+0x0/0x10^M
 [&amp;lt;ffffffffa0a70cd9&amp;gt;] ptlrpc_set_wait+0x319/0xa20 [ptlrpc]^M
 [&amp;lt;ffffffff81064c00&amp;gt;] ? default_wake_function+0x0/0x20^M
 [&amp;lt;ffffffffa0a7d385&amp;gt;] ? lustre_msg_set_jobid+0xf5/0x130 [ptlrpc]^M
 [&amp;lt;ffffffffa0a71461&amp;gt;] ptlrpc_queue_wait+0x81/0x220 [ptlrpc]^M
 [&amp;lt;ffffffffa138e101&amp;gt;] osp_remote_sync+0x121/0x190 [osp]^M
 [&amp;lt;ffffffffa1370cd8&amp;gt;] osp_attr_get+0x428/0x6e0 [osp]^M
 [&amp;lt;ffffffffa13726f7&amp;gt;] osp_object_init+0x1c7/0x330 [osp]^M
 [&amp;lt;ffffffffa0853648&amp;gt;] lu_object_alloc+0xd8/0x320 [obdclass]^M
 [&amp;lt;ffffffffa0854a31&amp;gt;] lu_object_find_try+0x151/0x260 [obdclass]^M
 [&amp;lt;ffffffffa0854bf1&amp;gt;] lu_object_find_at+0xb1/0xe0 [obdclass]^M
 [&amp;lt;ffffffffa0cf8b73&amp;gt;] ? fld_server_lookup+0x53/0x330 [fld]^M
 [&amp;lt;ffffffffa0854c5f&amp;gt;] lu_object_find_slice+0x1f/0x80 [obdclass]^M
 [&amp;lt;ffffffffa1101220&amp;gt;] lfsck_namespace_dsd_single+0x200/0xd50 [lfsck]^M
 [&amp;lt;ffffffffa1106406&amp;gt;] lfsck_namespace_double_scan_dir+0x6d6/0xe40 [lfsck]^M
 [&amp;lt;ffffffffa1106ec4&amp;gt;] lfsck_namespace_double_scan_one+0x354/0x1330 [lfsck]^M
 [&amp;lt;ffffffffa0854bf1&amp;gt;] ? lu_object_find_at+0xb1/0xe0 [obdclass]^M
 [&amp;lt;ffffffffa110845d&amp;gt;] lfsck_namespace_double_scan_one_trace_file+0x5bd/0x8d0 [lfsck]^M
 [&amp;lt;ffffffffa110c643&amp;gt;] lfsck_namespace_assistant_handler_p2+0x3b3/0x1830 [lfsck]^M
 [&amp;lt;ffffffff81087540&amp;gt;] ? process_timeout+0x0/0x10^M
 [&amp;lt;ffffffffa10ed913&amp;gt;] lfsck_assistant_engine+0x1633/0x2010 [lfsck]^M
 [&amp;lt;ffffffff81064c00&amp;gt;] ? default_wake_function+0x0/0x20^M
 [&amp;lt;ffffffffa10ec2e0&amp;gt;] ? lfsck_assistant_engine+0x0/0x2010 [lfsck]^M
 [&amp;lt;ffffffff8109e78e&amp;gt;] kthread+0x9e/0xc0^M
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20^M
 [&amp;lt;ffffffff8109e6f0&amp;gt;] ? kthread+0x0/0xc0^M
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20^M
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Another problem is that there were a lot of &quot;mdt_outxxx&quot; thread hang on the MDT as the shown stack:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[&amp;lt;ffffffffa0866b7d&amp;gt;] lu_object_find_at+0x3d/0xe0 [obdclass]
[&amp;lt;ffffffff81064c00&amp;gt;] ? default_wake_function+0x0/0x20
[&amp;lt;ffffffffa0867a9c&amp;gt;] dt_locate_at+0x1c/0xa0 [obdclass]
[&amp;lt;ffffffffa0b01e2e&amp;gt;] out_handle+0x105e/0x19a0 [ptlrpc]
[&amp;lt;ffffffff8105872d&amp;gt;] ? check_preempt_curr+0x6d/0x90
[&amp;lt;ffffffff8152b83e&amp;gt;] ? mutex_lock+0x1e/0x50
[&amp;lt;ffffffffa0af132a&amp;gt;] ? req_can_reconstruct+0x6a/0x120 [ptlrpc]
[&amp;lt;ffffffffa0af8bbc&amp;gt;] tgt_request_handle+0x8ec/0x1470 [ptlrpc]
[&amp;lt;ffffffffa0aa0231&amp;gt;] ptlrpc_main+0xe41/0x1910 [ptlrpc]
[&amp;lt;ffffffff8152a39e&amp;gt;] ? thread_return+0x4e/0x7d0
[&amp;lt;ffffffffa0a9f3f0&amp;gt;] ? ptlrpc_main+0x0/0x1910 [ptlrpc]
[&amp;lt;ffffffff8109e78e&amp;gt;] kthread+0x9e/0xc0
[&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
[&amp;lt;ffffffff8109e6f0&amp;gt;] ? kthread+0x0/0xc0
[&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The log shows that all the &quot;mdt_outxxx&quot; threads were blocked at the lu_object_find_at(). Related OUT RPCs were triggered by the namespace LFSCK on remote MDT for locating remote parent object. It must because that the parent object was in cache and marked as dying. But it is unknown (no logs) who was destroying the parent object and why it was not purged out of cache after the destroying.&lt;/p&gt;

&lt;p&gt;Di, is there any known issue about destroying object under DNE mode?&lt;/p&gt;

&lt;p&gt;One thing to be suspected is &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7680&quot; title=&quot;mdd_migrate_update_name() doesn&amp;#39;t put object on the orphan list&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7680&quot;&gt;&lt;del&gt;LU-7680&lt;/del&gt;&lt;/a&gt;. So please try with the following three patches for further verification. Thanks!&lt;br/&gt;
1) &lt;a href=&quot;http://review.whamcloud.com/17032/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17032/&lt;/a&gt;&lt;br/&gt;
2) &lt;a href=&quot;http://review.whamcloud.com/#/c/18082/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18082/&lt;/a&gt;&lt;br/&gt;
3) &lt;a href=&quot;http://review.whamcloud.com/#/c/18032/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18032/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="140010" author="heckes" created="Tue, 26 Jan 2016 10:21:06 +0000"  >&lt;p&gt;Ok. I&apos;ll prepare a build together with Di&apos;s latest DNE patches contained in &lt;a href=&quot;http://review.whamcloud.com/#/c/16838/39&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/16838/39&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="140025" author="heckes" created="Tue, 26 Jan 2016 13:49:32 +0000"  >&lt;p&gt;Hm, the first patch can&apos;t be applied:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[soakbuilder@lhn lustre-release]$ for i in /scratch/rpms/20160126/patches/*.patch; do git am $i; done
Applying: LU-0000 dne: dne llog fixes
warning: lustre/tests/conf-sanity.sh has type 100755, expected 100644
Applying: LU-6684 lfsck: stop lfsck even if some servers offline
error: patch failed: lustre/include/lustre_net.h:605
error: lustre/include/lustre_net.h: patch does not apply
error: patch failed: lustre/include/obd_support.h:557
error: lustre/include/obd_support.h: patch does not apply
error: patch failed: lustre/lfsck/lfsck_engine.c:1577
error: lustre/lfsck/lfsck_engine.c: patch does not apply
error: patch failed: lustre/lfsck/lfsck_internal.h:817
error: lustre/lfsck/lfsck_internal.h: patch does not apply
error: patch failed: lustre/lfsck/lfsck_layout.c:3248
error: lustre/lfsck/lfsck_layout.c: patch does not apply
error: patch failed: lustre/lfsck/lfsck_lib.c:31
error: lustre/lfsck/lfsck_lib.c: patch does not apply
error: patch failed: lustre/lfsck/lfsck_namespace.c:3931
error: lustre/lfsck/lfsck_namespace.c: patch does not apply
error: patch failed: lustre/obdclass/obd_mount_server.c:477
error: lustre/obdclass/obd_mount_server.c: patch does not apply
error: patch failed: lustre/osp/osp_trans.c:454
error: lustre/osp/osp_trans.c: patch does not apply
error: patch failed: lustre/ptlrpc/client.c:1661
error: lustre/ptlrpc/client.c: patch does not apply
error: patch failed: lustre/tests/sanity-lfsck.sh:4291
error: lustre/tests/sanity-lfsck.sh: patch does not apply
Patch failed at 0001 LU-6684 lfsck: stop lfsck even if some servers offline
When you have resolved this problem run &quot;git am --resolved&quot;.
If you would prefer to skip this patch, instead run &quot;git am --skip&quot;.
To restore the original branch and stop patching run &quot;git am --abort&quot;.
previous rebase directory /home/soakbuilder/repos/lustre-release/.git/rebase-apply still exists but mbox given.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Patch details:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[soakbuilder@lhn lustre-release]$ ls -1 /scratch/rpms/20160126/patches/
001-LU-0000_dne_dne_llog_fixes-PatchSet39.patch
002-LU-6684_lfsck_stop_lfsck_even_if_some_servers_offline-PatchSet6.patch
003-LU-6684_lfsck_set_the_lfsck_notify_as_interruptable-PatchSet3.patch
004-LU-7680_mdd_put_migrated_object_on_the_orphan_list
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Status of master branch used to create sub-branch :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[soakbuilder@lhn lustre-release]$ git describe ; git log | head -1
2.7.65-38-g607f691
commit 607f6919ea67b101796630d4b55649a12ea0e859
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="140026" author="yong.fan" created="Tue, 26 Jan 2016 13:54:38 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;http://review.whamcloud.com/17032/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17032/&lt;/a&gt; has already been landed to the latest master branch. If you are working on the latest master, then please apply the 2nd and 3rd patches directly.&lt;/p&gt;</comment>
                            <comment id="140285" author="jgmitter" created="Wed, 27 Jan 2016 20:56:04 +0000"  >&lt;p&gt;In triage today, it was reported that further work on the patch is needed after experiencing more failures.  nasf is actively looking at it.  &lt;/p&gt;</comment>
                            <comment id="140490" author="yong.fan" created="Fri, 29 Jan 2016 05:39:52 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;http://review.whamcloud.com/#/c/18082/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18082/&lt;/a&gt; has been verified on lola today, works. But there is something can be improved.&lt;/p&gt;</comment>
                            <comment id="140543" author="yong.fan" created="Fri, 29 Jan 2016 15:45:22 +0000"  >&lt;p&gt;The patch &lt;a href=&quot;http://review.whamcloud.com/#/c/18082/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18082/&lt;/a&gt; have been improved to handle lola trouble more properly.&lt;/p&gt;</comment>
                            <comment id="140544" author="yong.fan" created="Fri, 29 Jan 2016 15:47:37 +0000"  >&lt;p&gt;It is another failure instance of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6684&quot; title=&quot;lctl lfsck_stop hangs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6684&quot;&gt;&lt;del&gt;LU-6684&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="140545" author="simmonsja" created="Fri, 29 Jan 2016 16:13:28 +0000"  >&lt;p&gt;Since this ticket, which was a blocker, is a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6684&quot; title=&quot;lctl lfsck_stop hangs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6684&quot;&gt;&lt;del&gt;LU-6684&lt;/del&gt;&lt;/a&gt; shouldn&apos;t &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6684&quot; title=&quot;lctl lfsck_stop hangs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6684&quot;&gt;&lt;del&gt;LU-6684&lt;/del&gt;&lt;/a&gt; be marked as a blocker then&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="30488">LU-6684</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="20107" name="console-lola-11.log.bz2" size="92451" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                            <attachment id="20106" name="console-lola-9.log.bz2" size="71954" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                            <attachment id="20108" name="lfsck-info.txt.bz2" size="2788" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                            <attachment id="20135" name="lu-7662-lola-11-1452785464.17420-lustre-log" size="175090" author="heckes" created="Thu, 14 Jan 2016 15:46:45 +0000"/>
                            <attachment id="20110" name="messages-lola-11.log.bz2" size="35673" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                            <attachment id="20109" name="messages-lola-9.log.bz2" size="47141" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                            <attachment id="20111" name="vmcore-dmesg.txt.bz2" size="33612" author="heckes" created="Wed, 13 Jan 2016 21:28:17 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxy47:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>