<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:51:37 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12328] FLR mirroring on 2.12.1-1 not usable if OST is down</title>
                <link>https://jira.whamcloud.com/browse/LU-12328</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;See below for stripe details on the file &quot;mirror10&quot;. If OST idx 1 is unmounted and made unavailable, performance drops down to 1/10th of expected performance. The client has to timeout on OST idx1 before it tries to read from OST idx 7. This happens for each 1MB block as that is the block size being used resulting in very poor performance.&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;$ lfs getstripe mirror10
mirror10
 lcm_layout_gen: 5
 lcm_mirror_count: 2
 lcm_entry_count: 2
 lcme_id: 65537
 lcme_mirror_id: 1
 lcme_flags: init
 lcme_extent.e_start: 0
 lcme_extent.e_end: EOF
 lmm_stripe_count: 1
 lmm_stripe_size: 1048576
 lmm_pattern: raid0
 lmm_layout_gen: 0
 lmm_stripe_offset: 1
 lmm_pool: 01
 lmm_objects:
 - 0: { l_ost_idx: 1, l_fid: [0x100010000:0x280a8:0x0] }

lcme_id: 131074
 lcme_mirror_id: 2
 lcme_flags: init
 lcme_extent.e_start: 0
 lcme_extent.e_end: EOF
 lmm_stripe_count: 1
 lmm_stripe_size: 1048576
 lmm_pattern: raid0
 lmm_layout_gen: 0
 lmm_stripe_offset: 7
 lmm_pool: 02
 lmm_objects:
 - 0: { l_ost_idx: 7, l_fid: [0x100070000:0x28066:0x0] }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>RHEL 7.6</environment>
        <key id="55719">LU-12328</key>
            <summary>FLR mirroring on 2.12.1-1 not usable if OST is down</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="raot">Joe Frith</reporter>
                        <labels>
                    </labels>
                <created>Wed, 22 May 2019 19:24:11 +0000</created>
                <updated>Mon, 25 Nov 2019 21:18:42 +0000</updated>
                            <resolved>Fri, 18 Oct 2019 01:09:18 +0000</resolved>
                                    <version>Lustre 2.12.1</version>
                                    <fixVersion>Lustre 2.13.0</fixVersion>
                    <fixVersion>Lustre 2.12.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="247626" author="bobijam" created="Fri, 24 May 2019 07:55:10 +0000"  >&lt;p&gt;When the client read path encountered read error, it would retry another mirror, and do nothing to mark the mirror/replica unavailable.  We could mark the mirror&apos;s unavailability in the memory so that until the inode is dropped from cache, read will try avoid reading the unavailable mirror.&lt;/p&gt;</comment>
                            <comment id="247660" author="gerrit" created="Fri, 24 May 2019 18:10:11 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/34952&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34952&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: mark avoiding mirror in read&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 6e9139178be3a37f60e39aaabb87871788e9844a&lt;/p&gt;</comment>
                            <comment id="247661" author="pfarrell" created="Fri, 24 May 2019 18:19:57 +0000"  >&lt;p&gt;Jinshan pointed out that in the simple OST offline case, he doesn&apos;t think the client should keep timing out every request:&lt;br/&gt;
&quot;hmm.. This definitely is not expected. As long as ost 1 is down, it should be returned immediately from OSC layer and tries to read the 2nd mirror that is located on ost 7. For the following blocks, it should not even try ost1 but go to 7 directly.&lt;br/&gt;
&#160;&lt;br/&gt;
Would you please collect Lustre log and send&#160;it to me? You can collect logs on client side as follows:&lt;br/&gt;
0. create mirrored file&lt;br/&gt;
1. lctl set_param debug=-1 &amp;amp;&amp;amp; lctl clear&lt;br/&gt;
2. lctl mark &quot;======= start ========&quot;&lt;br/&gt;
3. read the file&lt;br/&gt;
4. lctl dk &amp;gt; log.txt&lt;br/&gt;
&#160;&lt;br/&gt;
and send me the log.txt file. If you can reproduce this problem consistently, please use a small file so that it would be easier to check the log.&lt;br/&gt;
&#160;&lt;br/&gt;
Jinshan&quot;&lt;/p&gt;

&lt;p&gt;We should probably still have a concept of a mirror being unhealthy for when something is intermittent but not fully offline.&lt;/p&gt;</comment>
                            <comment id="247663" author="adilger" created="Fri, 24 May 2019 19:05:49 +0000"  >&lt;p&gt;I guess the question is whether FLR expects the underlying OSC to fail the RPC quickly if the OST is offline, so that the upper layers do not need to track this themselves?&lt;/p&gt;</comment>
                            <comment id="247664" author="adilger" created="Fri, 24 May 2019 19:31:12 +0000"  >&lt;p&gt;Tejas, can you please also attach your log files here.&lt;/p&gt;</comment>
                            <comment id="247676" author="raot" created="Sat, 25 May 2019 00:14:59 +0000"  >&lt;p&gt;I have attached the log file.&#160;&#160;&lt;/p&gt;</comment>
                            <comment id="247682" author="bobijam" created="Sat, 25 May 2019 03:52:30 +0000"  >&lt;p&gt;From the log I can see that for every 1MB read, the lov_io&apos;s lis_mirror_index is cleared so that the read try start reading from mirror 0 again.&lt;/p&gt;

&lt;p&gt;In every ll_file_aio_read() iteration, the lu_env is a new one so that lov_io::lis_mirror_index does not have its old value I think.&lt;/p&gt;</comment>
                            <comment id="247709" author="adilger" created="Sat, 25 May 2019 06:21:57 +0000"  >&lt;p&gt;Alex&apos;s patch to move &lt;tt&gt;lu_env&lt;/tt&gt; into a global hash indexes by the process &lt;em&gt;could&lt;/em&gt; help this case, so that &lt;tt&gt;lu_env&lt;/tt&gt; stays around over multiple syscalls. However, there would be lifetime issues for how long we need to keep the &lt;tt&gt;lu_env&lt;/tt&gt; after a syscall completes, since we would not be notified when a process exits...  &lt;/p&gt;

&lt;p&gt;We &lt;em&gt;could&lt;/em&gt; tie the state to the open file handle instead of the lu_env, which would be an improvement (file handles disappear when the process exits), but keeping it in the file layout (in memory) as suggested on the patch would be even better. This state affects all threads accessing that file, so there is no reason to keep it in a per process structure like lu_env or file descriptor and have to re-discover it for each thread (or syscall!) accessing that file. &lt;/p&gt;

&lt;p&gt;My top preference is that we track the state on the OSC itself, since this is really global to all files stored on the OST.  That is essentially what the &quot;active&quot; state is. &lt;/p&gt;</comment>
                            <comment id="247736" author="jinshan" created="Sun, 26 May 2019 04:04:10 +0000"  >&lt;p&gt;I realized this piece of code has been removed from review:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt; 247                 /* move replica index to the next one */
 248                 spin_lock(&amp;amp;comp-&amp;gt;lo_write_lock);
 249                 if (index == comp-&amp;gt;lo_preferred_replica) {
 250                         do {
 251                                 index = (index + 1) % comp-&amp;gt;lo_entry_count;
 252                                 if (comp-&amp;gt;lo_entries[index].lle_valid)
 253                                         break;
 254                         } while (index != comp-&amp;gt;lo_preferred_replica);
 255
 256                         /* reset preferred replica so that other threads can
 257                          * take advantage of our retries. */
 258                         comp-&amp;gt;lo_preferred_replica = index;
 259                 } else {
 260                         /* preferred index was moved by other thread */
 261                         index = comp-&amp;gt;lo_preferred_replica;
 262                 }
 263                 spin_unlock(&amp;amp;comp-&amp;gt;lo_write_lock);
 264
 265                 CDEBUG(D_VFSTRACE, DFID &quot; move replica from %d to %d, &quot;
 266                        &quot;have retried: %d\n&quot;, PFID(lu_object_fid(lov2lu(obj))),
 267                        lio-&amp;gt;lis_replica_index, index, io-&amp;gt;ci_ndelay_tried);
 268
 269                 lio-&amp;gt;lis_replica_index = index;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This piece of code just tracks the last successful mirror. Right now, since lo_preferred_mirror is also used by write, we should add a new field called lo_last_success_mirror or similar to track the last success mirror.&lt;/p&gt;

&lt;p&gt;Tying the last success mirror to open file seems to have a problem that different opening files would use different mirror therefore the same piece of data would be cached more than once.&lt;/p&gt;

&lt;p&gt;Meanwhile, we should revise the commit:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;commit 5a6ceb664f07812c351786c1043da71ff5027f8c
Author: Alex Zhuravlev &amp;lt;alexey.zhuravlev@intel.com&amp;gt;
Date:   Mon Sep 28 16:50:15 2015 +0300

    LU-7236 ptlrpc: idle connections can disconnect
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;to honor rq_no_delay in the RPC&lt;/p&gt;</comment>
                            <comment id="247822" author="adilger" created="Tue, 28 May 2019 04:31:22 +0000"  >&lt;blockquote&gt;
&lt;p&gt;I realized this piece of code has been removed from review:&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;JInshan, I don&apos;t see any record of that code on master.  Was it maybe only in a patch on the FLR branch, or only in your local checkout?&lt;/p&gt;

&lt;p&gt;Alex, any thoughts on how to fix the issue Jinshan describes?&lt;/p&gt;</comment>
                            <comment id="248025" author="raot" created="Thu, 30 May 2019 00:22:02 +0000"  >&lt;p&gt;This is a major issue for us as we cannot continue with maintenance unless this issue is fixed.&#160;&lt;/p&gt;

&lt;p&gt;Will this be included in the next release 2.12.3? Any time-frame on this?&lt;/p&gt;</comment>
                            <comment id="248034" author="bzzz" created="Thu, 30 May 2019 04:03:11 +0000"  >&lt;p&gt;&amp;gt; to honor rq_no_delay in the RPC&lt;br/&gt;
thinking how to deal with that, but the important thing is that the semantics has changed - if the connection is idle, then you have to wait, some time to try to reconnect?&lt;br/&gt;
for example, all the connections can be idle, do you expect an error in this case?&lt;/p&gt;

</comment>
                            <comment id="248038" author="adilger" created="Thu, 30 May 2019 08:10:42 +0000"  >&lt;p&gt;Alex, ideally there would be a two-stage approach for FLR.  For reads it would try whichever OST is preferred.  If the OSC is offline then it could be skipped initially, and the read go to the other mirror copies if the OSCs are online.  If none are online, then it should wait on the preferred OSC.  For writes, the MDS selects which replica should be used, so the client will have to wait until the OSC is connected again.&lt;/p&gt;</comment>
                            <comment id="248039" author="bzzz" created="Thu, 30 May 2019 08:13:22 +0000"  >&lt;p&gt;that logic would preevnt balancing?&lt;/p&gt;</comment>
                            <comment id="248086" author="jinshan" created="Fri, 31 May 2019 00:33:56 +0000"  >&lt;blockquote&gt;&lt;p&gt;JInshan, I don&apos;t see any record of that code on master. Was it maybe only in a patch on the FLR branch, or only in your local checkout?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Yes, I found this piece of code from my local branch. That version of patch was an earlier version of implementation. Notice that the name was &apos;replica&apos; at that time. I believe it should be in one of abandoned patch.&lt;/p&gt;

&lt;p&gt;If the connection is in IDLE state, probably it should return immediately if the RPC has `rq_no_delay` set, and I tend to think it also should kick off the reconnection asynchronously.&lt;/p&gt;

&lt;p&gt;In the current implementation of FLR, it iterates all mirrors until it finds one available to read. If none is available, it will wait for 10ms and restart trying. Hopefully, it would find that one mirror that becomes available to read if it kicks off reconnect earlier.&lt;/p&gt;</comment>
                            <comment id="248264" author="bzzz" created="Mon, 3 Jun 2019 13:54:49 +0000"  >&lt;p&gt;in terms of latency it makes sense to use first/any available target. in terms of throughput it makes sense to balance I/O among the targets. thus I guess the code should be able to detect the point where IO becomes &quot;massive&quot; for specific object and then use idling connections, but not sooner?&lt;br/&gt;
would hitting max-RPC-in-flight be another way to detect when balancing makes sense?&lt;br/&gt;
yet another thougth is that it doesn&apos;t makes sense to allocate/prepare RPC against idling connection unless we really want to use it - i.e. rq_no_delay isn&apos;t really the best interface for this kind of logic. and even when we want to enbale that connection (due to balancing), we don&apos;t want to block with rq_no_delay, but proceed with FULL one and initiate idling one?&lt;/p&gt;</comment>
                            <comment id="248283" author="jinshan" created="Mon, 3 Jun 2019 18:17:20 +0000"  >&lt;p&gt;Throughput from a single node has never been a goal for FLR, so that current logic is to find an available mirror and stick with that one.&lt;/p&gt;

&lt;p&gt;And yes, I think we should not block `rq_no_delay`.&lt;/p&gt;

&lt;p&gt;Let&apos;s put the IDLE connection away a little bit first - if I remember it correctly, the user is actually experiencing a problem that the connection is in DISCON, should we fix it first?&lt;/p&gt;</comment>
                            <comment id="248306" author="adilger" created="Mon, 3 Jun 2019 23:07:14 +0000"  >&lt;p&gt;Alex, I definitely have some ideas on client-side read policy, in order to maximize global throughput vs. single-client throughput, from LDEV-436:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;In particular, it would be good if reading the same data from a file will normally read from the same OST/replica so that this can be handled from the cache, rather than using a random replica and forcing disk reads on multiple OSTs. Not only does this avoid disk activity, but it also avoids the case of a single client doing reads from multiple OSTs and needing to get DLM locks from each one.&lt;/p&gt;

&lt;p&gt;If the file is getting very large (e.g. multi-GB), it makes sense to spread the read workload across multiple OSTs in some deterministic manner (e.g. replica count and file offset) so that there are mutliple OSTs active on the file, and at least several of the replicas active if many clients are reading at different offsets from the same file.&lt;/p&gt;

&lt;p&gt;If there are large numbers of replicas for a single file, then the clients should spread the read workload across all of them (e.g. based on client NID), on the assumption that a user creates 10+ replicas of a file to increase the read bandwidth).&lt;/p&gt;&lt;/blockquote&gt;</comment>
                            <comment id="248380" author="bzzz" created="Tue, 4 Jun 2019 14:47:11 +0000"  >&lt;p&gt;I&apos;m not against changing semantics of rq_no_delay, but it should be noticed that another users (like lfs df) would need to be changed then where the callers wants to try to connect at least once before giving up.&lt;br/&gt;
given extra costs of preparing RPC I think a better/cheaper interface to check connection status is needed.&lt;/p&gt;</comment>
                            <comment id="248810" author="gerrit" created="Sat, 8 Jun 2019 05:35:49 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@gmail.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/35111&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35111&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: preserve last read mirror&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 7f9985832de7699f06fdef2916a280a3666ca7cf&lt;/p&gt;</comment>
                            <comment id="249969" author="raot" created="Tue, 25 Jun 2019 13:52:37 +0000"  >&lt;p&gt;Will this patch be included in&#160;Lustre 2.12.3? We are delaying maintenance because of this issue.&#160;&lt;/p&gt;</comment>
                            <comment id="250044" author="adilger" created="Wed, 26 Jun 2019 06:06:34 +0000"  >&lt;p&gt;As yet the patch has not landed on master, so that would need to happen before it can land to b2_12.&lt;/p&gt;</comment>
                            <comment id="250806" author="gerrit" created="Sun, 7 Jul 2019 15:15:59 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/35111/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35111/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: preserve last read mirror&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 810f2a5fef577b4f0f6a58ab234cf29afd96c748&lt;/p&gt;</comment>
                            <comment id="250913" author="gerrit" created="Tue, 9 Jul 2019 17:27:45 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/35450&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35450&lt;/a&gt;&lt;br/&gt;
Subject: Revert &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: preserve last read mirror&quot;&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: e4788166435a05e5fe39107ebbcb167e13a74bcc&lt;/p&gt;</comment>
                            <comment id="250914" author="gerrit" created="Tue, 9 Jul 2019 17:28:00 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/35450/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35450/&lt;/a&gt;&lt;br/&gt;
Subject: Revert &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: preserve last read mirror&quot;&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 0a8750628d9a87f686b917c88e42093a52a78ae3&lt;/p&gt;</comment>
                            <comment id="253962" author="raot" created="Fri, 30 Aug 2019 20:45:37 +0000"  >&lt;p&gt;Did the patch get reverted after being included in the master?&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;We are still hoping that this issue gets resolved so we can go ahead with the maintenance.&#160;&lt;/p&gt;</comment>
                            <comment id="253966" author="adilger" created="Sat, 31 Aug 2019 00:15:54 +0000"  >&lt;p&gt; The patch was reverted because it was causing frequent crashes in testing (&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12525&quot; title=&quot;sanity-flr test 200 and others asertion in osc_page_delete&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12525&quot;&gt;&lt;del&gt;LU-12525&lt;/del&gt;&lt;/a&gt;).&lt;/p&gt;

&lt;p&gt;The original patch &lt;a href=&quot;https://review.whamcloud.com/34952&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34952&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: avoid reading unhealthy mirror&lt;/tt&gt;&quot; should fix the original problem, but it needs to be refreshed again.&lt;/p&gt;</comment>
                            <comment id="254834" author="raot" created="Mon, 16 Sep 2019 23:34:22 +0000"  >&lt;p&gt;Any chance this will get included in the 2.12.3? We are stuck and cannot migrate due to this issue.&#160;&lt;/p&gt;</comment>
                            <comment id="256094" author="adilger" created="Tue, 8 Oct 2019 20:54:06 +0000"  >&lt;p&gt;Tejas, I don&apos;t think this patch will make it into 2.12.3, because it hasn&apos;t yet landed to master.&lt;/p&gt;

&lt;p&gt;However, I think the current version of the patch is in good enough shape for you to test.  It would be useful if you could give the latest patch a try and let us know if this is working for you.&lt;/p&gt;</comment>
                            <comment id="256239" author="raot" created="Fri, 11 Oct 2019 14:38:47 +0000"  >&lt;p&gt;I did a quick test and the patch seems to work as expected, read performance is as expected with an unhealthy mirror. I did not however run compressive tests.&#160;&#160;&lt;/p&gt;</comment>
                            <comment id="256602" author="gerrit" created="Fri, 18 Oct 2019 01:06:49 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/34952/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/34952/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: avoid reading unhealthy mirror&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 39da3c06275e04e2a6e7f055cb27ee9dff1ea576&lt;/p&gt;</comment>
                            <comment id="256606" author="pjones" created="Fri, 18 Oct 2019 01:09:18 +0000"  >&lt;p&gt;Landed for 2.13&lt;/p&gt;</comment>
                            <comment id="256857" author="gerrit" created="Tue, 22 Oct 2019 17:41:29 +0000"  >&lt;p&gt;Minh Diep (mdiep@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/36550&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36550&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: avoid reading unhealthy mirror&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 95b0b3d9aa1d0de120e788eef96d4a1f42ff9d6c&lt;/p&gt;</comment>
                            <comment id="258618" author="gerrit" created="Thu, 21 Nov 2019 07:35:25 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/36550/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36550/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12328&quot; title=&quot;FLR mirroring on 2.12.1-1 not usable if OST is down&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12328&quot;&gt;&lt;del&gt;LU-12328&lt;/del&gt;&lt;/a&gt; flr: avoid reading unhealthy mirror&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 02affb11d4162f23eadef7e0ed15982e11005a41&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="56296">LU-12525</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32398">LU-7236</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="32642" name="log.zip" size="558651" author="raot" created="Sat, 25 May 2019 00:14:18 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00grz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>