<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:06:49 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-14098] LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK</title>
                <link>https://jira.whamcloud.com/browse/LU-14098</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Getting the following errors in messages log on MDS:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
Oct 30 20:30:37 bmds1 kernel: LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Oct 30 20:30:38 bmds1 kernel: LustreError: 19498:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Oct 30 20:30:38 bmds1 kernel: LustreError: 19498:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 2 previous similar messages
Oct 30 20:30:39 bmds1 kernel: LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Oct 30 20:30:39 bmds1 kernel: LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 2 previous similar messages
Oct 30 20:30:41 bmds1 kernel: LustreError: 19516:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Oct 30 20:30:41 bmds1 kernel: LustreError: 19516:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 74 previous similar messages
Oct 30 20:30:45 bmds1 kernel: LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Oct 30 20:30:45 bmds1 kernel: LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 233 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</description>
                <environment>CemtOS 7.6.1810</environment>
        <key id="61464">LU-14098</key>
            <summary>LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="cmcl">Campbell Mcleay</reporter>
                        <labels>
                            <label>LTS12</label>
                    </labels>
                <created>Sat, 31 Oct 2020 12:43:20 +0000</created>
                <updated>Thu, 27 Oct 2022 03:15:50 +0000</updated>
                            <resolved>Fri, 26 Feb 2021 14:42:35 +0000</resolved>
                                    <version>Lustre 2.12.5</version>
                                    <fixVersion>Lustre 2.12.8</fixVersion>
                    <fixVersion>Lustre 2.15.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="284017" author="adilger" created="Mon, 2 Nov 2020 18:56:31 +0000"  >&lt;p&gt;Alex, can you please comment - does the &quot;&lt;tt&gt;osp_sync_declare_add()) logging isn&apos;t available&lt;/tt&gt;&quot; message mean that OST objects will not be destroyed?  Is this affecting only a single OST, or all OSTs?  Improving the error message to include the source and target device names would be very helpful.  &lt;/p&gt;

&lt;p&gt;What would be needed to repair the llog file here?  I think the LFSCK message is regarding cleaning up orphans?&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=cmcl&quot; class=&quot;user-hover&quot; rel=&quot;cmcl&quot;&gt;cmcl&lt;/a&gt;, could you check if creating and deleting OST objects on each OST is consuming and releasing space as expected?  e.g. create a file on each OST with &quot;&lt;tt&gt;lfs setstripe -i $ost_idx /path/to/file&lt;/tt&gt;&quot; and write 1GB there, then delete it and verify with &quot;&lt;tt&gt;lfs df&lt;/tt&gt;&quot; before and after to confirm space is being released.&lt;/p&gt;</comment>
                            <comment id="284038" author="cmcl" created="Mon, 2 Nov 2020 21:04:43 +0000"  >&lt;p&gt;Hi Andreas, &lt;/p&gt;

&lt;p&gt;Just want to make sure I&apos;m doing the right thing: I can get the $ost_idx from e.g., &apos;lctl dl&apos; (number in the left hand column) or &apos;lfs df&apos; (number in the right hand column), and I should run this command on the client, correct?&lt;/p&gt;

&lt;p&gt;Thanks,&lt;/p&gt;

&lt;p&gt;Campbell&lt;/p&gt;</comment>
                            <comment id="284107" author="adilger" created="Tue, 3 Nov 2020 06:49:27 +0000"  >&lt;p&gt;Campbell, the OST index is the number on the right for &quot;&lt;tt&gt;lfs df&lt;/tt&gt;&quot; (ie. &lt;tt&gt;bravo-OST0016&lt;/tt&gt; is OST index 0x16, or 22.  It is possible to use either the hex or decimal values with &quot;&lt;tt&gt;lfs setstripe -i&lt;/tt&gt;&quot;, which should be run on the client. &lt;/p&gt;</comment>
                            <comment id="284161" author="bzzz" created="Tue, 3 Nov 2020 17:19:52 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=adilger&quot; class=&quot;user-hover&quot; rel=&quot;adilger&quot;&gt;adilger&lt;/a&gt; yes, that message from osp_sync_declare_add() means that no llog record will be made and orphan will be left for that object. all OSTs are independed in this regard.&lt;br/&gt;
I&apos;ll make a patch improving the message.&lt;br/&gt;
and yes, LFSCK is intended to clean the resulting orphans.&lt;br/&gt;
AFAIU (to be verified) MDT should be restarted to use llog again.&lt;/p&gt;
</comment>
                            <comment id="284209" author="adilger" created="Wed, 4 Nov 2020 08:25:55 +0000"  >&lt;p&gt;Alex, in the case that the llog is not working, is it possible for the MDS to continue to destroy the OST objects &lt;b&gt;without&lt;/b&gt; the llog for recovery?  Clearly this would not handle the case when the MDS is rebooted, but it can&apos;t be worse than not doing &lt;b&gt;any&lt;/b&gt; object destroy operations?&lt;/p&gt;</comment>
                            <comment id="284217" author="bzzz" created="Wed, 4 Nov 2020 11:13:03 +0000"  >&lt;p&gt;the original idea was to destroy OST object when corresponding unlink is committed. then to save on commit callbacks OSP tracks last committed logid and then, getting records back the llog (they are still in cache usually) OSP issues OST_DESTROY RPC.&lt;br/&gt;
I think it&apos;s possible to implement commit-callback-per-object schema either as a primary one or as a fallack method.&lt;/p&gt;</comment>
                            <comment id="284219" author="cmcl" created="Wed, 4 Nov 2020 11:39:58 +0000"  >&lt;p&gt;Currently, there&apos;s quite a lot of writes and deletions happening as this cluster catches up on backups. Am waiting for a quiet time when the results will be clear to determine whether space in OSTs is consumed/released as per writes/deletes as expected&lt;/p&gt;</comment>
                            <comment id="284259" author="adilger" created="Wed, 4 Nov 2020 18:30:02 +0000"  >&lt;p&gt;Alex, can you please explain why the llog context is not being initialized properly?  I&apos;d think there is something wrong/missing with the llog itself, but I&apos;m not sure how to debug it. &lt;/p&gt;</comment>
                            <comment id="284303" author="bzzz" created="Thu, 5 Nov 2020 05:22:43 +0000"  >&lt;p&gt;iirc, the most frequent reason - -EINPROGRESS when something went wrong with OI and OSP couldn&apos;t open the llog at mounting.&lt;br/&gt;
it would be helpful to see the logs since boot time.&lt;/p&gt;</comment>
                            <comment id="284326" author="cmcl" created="Thu, 5 Nov 2020 10:57:28 +0000"  >&lt;p&gt;Hi Alex,&lt;/p&gt;

&lt;p&gt;Attached are the logs from the mds &lt;span class=&quot;nobr&quot;&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/attachment/36558/36558_bmds1-msgs.txt.gz&quot; title=&quot;bmds1-msgs.txt.gz attached to LU-14098&quot;&gt;bmds1-msgs.txt.gz&lt;sup&gt;&lt;img class=&quot;rendericon&quot; src=&quot;https://jira.whamcloud.com/images/icons/link_attachment_7.gif&quot; height=&quot;7&quot; width=&quot;7&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/span&gt; .&lt;/p&gt;

&lt;p&gt;Kind regards,&lt;/p&gt;

&lt;p&gt;Campbell&lt;/p&gt;</comment>
                            <comment id="284329" author="bzzz" created="Thu, 5 Nov 2020 11:06:17 +0000"  >&lt;p&gt;thanks, the llog got corrupted:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
Oct 30 20:29:06 bmds1 kernel: LustreError: 19089:0:(llog_osd.c:987:llog_osd_next_block()) bravo-MDT0000-osd: invalid llog tail at log id [0x1584b:0x1:0x0]:0 offset 4575232 last_rec idx 1 tail idx 0lrt len 20 read_size 4096
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;the easiest solution would be to stop that MDT, remove the llog manually and remount again.&lt;br/&gt;
would a detailed instruction help you?&lt;/p&gt;</comment>
                            <comment id="284330" author="cmcl" created="Thu, 5 Nov 2020 11:12:14 +0000"  >&lt;p&gt;Hi Alex, detailed instructions would be a great help, thank you.&lt;/p&gt;</comment>
                            <comment id="284643" author="bzzz" created="Sun, 8 Nov 2020 19:46:52 +0000"  >&lt;p&gt;your MDT should be unmounted, then please follow the example:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
# debugfs -c &amp;lt;mdt device&amp;gt; -R &lt;span class=&quot;code-quote&quot;&gt;&quot;stat /O/1/d11/88139&quot;&lt;/span&gt;
...
Inode: 159   Type: regular    Mode:  0644   Flags: 0x4000
Generation: 2694950587    Version: 0x00000000:00000000
User:     0   Group:     0   Project:     0   Size: 11520
File ACL: 0
....
Extended attributes:
  lma: fid=[0x1:0x1584b:0x0] compat=0 incompat=0
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;at this point please check that fid has 0x1584b in the line starting with lma: fid=...&lt;br/&gt;
if it is, then you can remove that file using direcly mounted ldiskfs&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
mount -t ldiskfs &amp;lt;mdt device&amp;gt; &amp;lt;mountpoint&amp;gt;
rm &amp;lt;mountpoint&amp;gt;/O/1/d11/88139
umount &amp;lt;mountpoint&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;now you can remount MDT. I&apos;d suggest to run LFSCK to clean orphans.&lt;/p&gt;</comment>
                            <comment id="285253" author="cmcl" created="Mon, 16 Nov 2020 18:22:58 +0000"  >&lt;p&gt;Thanks Alex, will let you know when I have done this. Still waiting for a window&lt;/p&gt;</comment>
                            <comment id="285971" author="gerrit" created="Wed, 25 Nov 2020 12:30:38 +0000"  >&lt;p&gt;Alex Zhuravlev (bzzz@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40754&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40754&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: ignore and remove corrupted plain llogs&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b7bbd8993bfb3bcf49acc8b677a8986f8d785d65&lt;/p&gt;</comment>
                            <comment id="290331" author="cmcl" created="Mon, 25 Jan 2021 22:56:47 +0000"  >&lt;p&gt;Hi all.  We&apos;re seeing another occurrence of this on another Lustre cluster.  We have 6 OSTs that are now showing 100% full.  I&apos;ve used the recommendation on this page: &lt;a href=&quot;https://wiki.lustre.org/Handling_Full_OSTs&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.lustre.org/Handling_Full_OSTs&lt;/a&gt; to set max_create_count to 0 on those.  I&apos;ve also e2fsck&apos;d the MDT.  Here&apos;s the main snippet from the syslog:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[Jan25 10:18] LustreError: 40160:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
[  +0.001271] LustreError: 40160:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 16497 previous similar messages
[Jan25 10:21] LustreError: 4753:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
[  +0.002111] LustreError: 4753:0:(osp_precreate.c:686:osp_precreate_send()) Skipped 76 previous similar messages&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Given the processes followed above, I&apos;ve grepped through our syslog for the past while, looking for the invalid llog tail info and see the following:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2020-11-26-syslog.log:Nov 26 09:25:31 emds1 kernel: LustreError: 4811:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5919:0x1:0x0]:0 offset 1708032 bytes 4096
2020-11-26-syslog.log:Nov 26 09:25:31 emds1 kernel: LustreError: 4907:0:(llog_osd.c:988:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x590f:0x1:0x0]:0 offset 4747264 last_rec idx 1811939328 tail idx 0lrt len 8925011 read_size 4096
2020-11-26-syslog.log:Nov 26 09:25:31 emds1 kernel: LustreError: 4973:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x592c:0x1:0x0]:0 offset 2904064 bytes 4096
2020-12-19-syslog.log:Dec 19 00:25:21 emds1 kernel: LustreError: 4752:0:(llog_osd.c:988:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5919:0x1:0x0]:0 offset 1708032 last_rec idx 2 tail idx 0lrt len 8590435 read_size 4096
2020-12-19-syslog.log:Dec 19 00:25:21 emds1 kernel: LustreError: 4823:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5954:0x1:0x0]:0 offset 626688 bytes 4096
2020-12-19-syslog.log:Dec 19 00:25:22 emds1 kernel: LustreError: 4987:0:(llog_osd.c:988:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5934:0x1:0x0]:0 offset 2920448 last_rec idx 0 tail idx 33065lrt len 88 read_size 4096
2021-01-25-syslog.log:Jan 25 13:41:45 emds1 kernel: LustreError: 4492:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5919:0x1:0x0]:0 offset 1708032 bytes 4096
2021-01-25-syslog.log:Jan 25 13:41:45 emds1 kernel: LustreError: 4589:0:(llog_osd.c:988:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x590f:0x1:0x0]:0 offset 4747264 last_rec idx 9787 tail idx 0lrt len 8925011 read_size 4096&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Given those entries, would I be right in thinking I should do the following:&lt;/p&gt;

&lt;p&gt;Convert those 0x59XX values to decimal so:&lt;/p&gt;

&lt;p&gt;0x590f is 22799&lt;br/&gt;
0x5919 is 22809&lt;br/&gt;
0x5934 is 22836&lt;br/&gt;
0x5954 is 22868&lt;br/&gt;
0x592c is 22828&lt;/p&gt;

&lt;p&gt;I&apos;d then run: debugfs -c &amp;lt;mdt device&amp;gt; -R &quot;stat /O/1/d11/&amp;lt;insert decimal number here&amp;gt;&quot;&lt;/p&gt;

&lt;p&gt;and check the lma fid matches, then delete those files and remount?&lt;/p&gt;

&lt;p&gt;Please let me know if that&apos;s correct or if you require any more info to get a full idea of what&apos;s going on.  As in the past, we&apos;re trying to get a set of good backups through so I&apos;ll be trying to wait for those to finish happily before taking the MDT down again.&lt;/p&gt;</comment>
                            <comment id="290332" author="cmcl" created="Mon, 25 Jan 2021 22:59:39 +0000"  >&lt;p&gt;(I should note that following the fsck of the MDT, I have an lfsck running as suggested by the syslog.)&lt;/p&gt;</comment>
                            <comment id="290347" author="adilger" created="Tue, 26 Jan 2021 03:04:42 +0000"  >&lt;p&gt;Campbell, you &lt;em&gt;could&lt;/em&gt; try Alex&apos;s patch: &lt;a href=&quot;https://review.whamcloud.com/40754&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40754&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: ignore and remove corrupted plain llogs&lt;/tt&gt;&quot; on the MDS.  This should clean up the broken llogs automatically when they are encountered rather than doing it by hand.&lt;/p&gt;

&lt;p&gt;However, the process that you describe in your previous comment also looks correct, and may be more expedient for the short term.&lt;/p&gt;</comment>
                            <comment id="290505" author="cmcl" created="Wed, 27 Jan 2021 22:48:28 +0000"  >&lt;p&gt;Thanks Andreas.  For now I&apos;m probably gonna go with the simple deletions once the current run of backups are done but we definitely look forward to seeing that patch in an upcoming release.&lt;/p&gt;</comment>
                            <comment id="290732" author="cmcl" created="Fri, 29 Jan 2021 20:41:17 +0000"  >&lt;p&gt;I&apos;ve removed those log files - they weren&apos;t in d11 but neighbour directories - I checked the lma fids on them and they all matched so seemed correct.&lt;/p&gt;

&lt;p&gt;fsck run on the MDT (which made some changes) and now remounted.  Running the lfsck --all -M echo-MDT00010 now.&lt;/p&gt;

&lt;p&gt;Should we expect to see the space on the OSTs gradually free as the lfsck works its way through things over the next few days?  Does it happen in a particular scanning-phase?&lt;/p&gt;</comment>
                            <comment id="290784" author="adilger" created="Sat, 30 Jan 2021 20:05:14 +0000"  >&lt;p&gt;If you don&apos;t add an lfsck option to clean up the orphans then it will just report that they exist.  If you add &quot;&lt;tt&gt;-o&lt;/tt&gt;&quot; it will link orphan objects under &lt;tt&gt;$MOUNT/.lustre/lost+found/MDTxxxx&lt;/tt&gt; for review and cleanup (i.e. &quot;&lt;tt&gt;rm&lt;/tt&gt;&quot;). See the &lt;tt&gt;lctl-lfsck-start(8)&lt;/tt&gt; man page for details.&lt;/p&gt;</comment>
                            <comment id="290903" author="cmcl" created="Mon, 1 Feb 2021 22:21:59 +0000"  >&lt;p&gt;Would these errors in the MDS syslog be down to the lfsck sorting things out, or is this a separate issue:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  +4.946556] LDISKFS-fs warning: 844 callbacks suppressed
[  +0.000008] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2624: inode 769803009: comm lfsck_layout: index 2: reach max htree level 2
[  +0.000004] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2628: Large directory feature is not enabled on this filesystem
[  +0.003452] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2624: inode 769803009: comm lfsck_layout: index 2: reach max htree level 2
[  +0.000005] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2628: Large directory feature is not enabled on this filesystem
[  +0.002557] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2624: inode 769803009: comm lfsck_layout: index 2: reach max htree level 2
[  +0.000004] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2628: Large directory feature is not enabled on this filesystem
[  +0.017404] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2624: inode 769803009: comm lfsck_layout: index 2: reach max htree level 2
[  +0.000004] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2628: Large directory feature is not enabled on this filesystem
[  +0.006475] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2624: inode 769803009: comm lfsck_layout: index 2: reach max htree level 2
[  +0.000004] LDISKFS-fs warning (device md0): ldiskfs_dx_add_entry:2628: Large directory feature is not enabled on this filesystem
[  +0.550317] Lustre: 46464:0:(osd_handler.c:501:osd_ldiskfs_add_entry()) echo-MDT0000: directory (inode: 769803009, FID: [0x2000207a3:0x1:0x0]) has reached maximum entry limit
[  +0.000008] Lustre: 46464:0:(osd_handler.c:501:osd_ldiskfs_add_entry()) Skipped 7907 previous similar messages&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="290904" author="cmcl" created="Mon, 1 Feb 2021 22:31:40 +0000"  >&lt;p&gt;Ah - my guess is that this is putting all the ophans in .lustre/lost+found/MDT0000 and after repairing 12M layouts (so far) it can&apos;t put any more files in the unstructured lost+found dir.&lt;/p&gt;

&lt;p&gt;Not sure what effect that&apos;s gonna have on the ongoing lfsck.  An ls or a find in the lost+found dir hangs so I think my guess is correct.&lt;/p&gt;

&lt;p&gt;I&apos;m gonna run a find . -exec /bin/rm -f to try and clear those files&lt;/p&gt;</comment>
                            <comment id="290926" author="adilger" created="Tue, 2 Feb 2021 04:02:35 +0000"  >&lt;p&gt;If you are fairly confident that these objects under &lt;tt&gt;.lustre/lost+found/MDT0000/&lt;/tt&gt; are orphans, then deleting them is the right way forward. &lt;/p&gt;</comment>
                            <comment id="290992" author="cmcl" created="Tue, 2 Feb 2021 17:39:23 +0000"  >&lt;p&gt;With the lfsck finished and the find rm finished, 5 of the 6 100% full OSTs are now near empty so I&apos;ve restored their max_create_count values.  One OST is still however 100% full and we&apos;re still seeing the following in dmesg:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[Feb 2 06:25] LustreError: 20636:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
[  +0.001714] LustreError: 20636:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 34991 previous similar messages&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I&apos;ve double checked all the syslogs we have going back a couple of months for occurrences of &apos;llog tail&apos; and can only find the ones I listed above.  Do you have any recommendations for how to go about finding the bad log file?&lt;/p&gt;</comment>
                            <comment id="291011" author="adilger" created="Tue, 2 Feb 2021 20:53:30 +0000"  >&lt;p&gt;Campbell, it would be most useful to check for &quot;&lt;tt&gt;LustreError:&lt;/tt&gt;&quot; messages reported after MDT0000 was mounted when you deleted the llog objects.  If you could attach the MDS &lt;tt&gt;/var/log/messages&lt;/tt&gt; file from that time it would be helpful, along with knowing which specific OST is still having the problem.  It &lt;em&gt;might&lt;/em&gt; have printed something like &quot;&lt;tt&gt;echo-OST0003-osc-MDT0000: ctxt 2 lop_setup=0xffff001234567890 failed: rc = -22&lt;/tt&gt;&quot;.&lt;/p&gt;

&lt;p&gt;Unfortunately, the &lt;tt&gt;osp_sync_declare_add()&lt;/tt&gt; message doesn&apos;t report which OST is having the problem.  Even so, this function is only trying to get a reference on an existing data structure in memory, so even if it included the OST name it wouldn&apos;t be able to print anything about the specific llog file that was having the problem.  That would have been loaded at startup time.  If the logs from the previous mount don&apos;t show anything, it might be possible to re-trigger this by only unmounting and remounting the OST, and capturing the Lustre debug logs on the MDS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;oss# umount /mnt/&amp;lt;ost_mount&amp;gt; (or whatever HA command is used to stop the OST)
mds# lctl set_param debug=+info+config debug_mb=1024
mds# lctl clear
oss# mount /mnt/&amp;lt;ost_mount&amp;gt; (or as appropriate)
[wait a few seconds for OST to connect to the MDS]
mds# lctl dk /tmp/debug
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;That would be more useful than having to remount the MDT and having the logs from all of the OSTs reconnecting mixed together.&lt;/p&gt;

&lt;p&gt;Alex, it would be useful to make a small patch (separate from patch &lt;a href=&quot;https://review.whamcloud.com/40754&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40754&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: try to skip corrupted llog records&lt;/tt&gt;&quot;) that improves the error message in &lt;tt&gt;osp_sync_declare_add()&lt;/tt&gt; - add in the MDT name (at the start) and OST device name (in the message).  That would at least allow debugging which OST (and which MDT in DNE mode) the problem is related to.    It looks like the 40754 patch does not touch this function, so there wouldn&apos;t be any conflicts.&lt;/p&gt;</comment>
                            <comment id="291028" author="cmcl" created="Tue, 2 Feb 2021 22:31:31 +0000"  >&lt;p&gt;The OST in question is: echo-OST0028_UUID&lt;/p&gt;

&lt;p&gt;I&apos;ll paste below the LustreError lines from that day&apos;s syslog and I&apos;ll attach the whole thing anyway.  You&apos;ll see an unmount at about 9:50am when I deleted the logs, then a reboot followed by an e4fsck, one more reboot then a &apos;normal&apos; mount.&lt;/p&gt;

&lt;p&gt;I&apos;ll try the sequence you suggest shortly.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jan 29 00:17:42 emds1 kernel: LustreError: 21600:0:(upcall_cache.c:233:upcall_cache_get_entry()) acquire for key 463: error -110
Jan 29 02:51:00 emds1 kernel: LustreError: 4436:0:(ldlm_lockd.c:256:expired_lock_main()) ### lock callback timer expired after 99s: evicting client at 10.23.22.113@tcp  ns: mdt-echo-MDT0000_UUID lock: ffff9b73b28f33c0/0x3dc1d8a5f466cd2c lrc: 3/0,0 mode: PR/PR res: [0x20002bd3e:0x1728b:0x0].0x0 bits 0x20/0x0 rrc: 4 type: IBT flags: 0x60200400000020 nid: 10.23.22.113@tcp remote: 0x2f5cecde3bdac8df expref: 440542 pid: 12166 timeout: 306605 lvb_type: 0
Jan 29 02:51:02 emds1 kernel: LustreError: 12229:0:(ldlm_lockd.c:2324:ldlm_cancel_handler()) ldlm_cancel from 10.23.22.113@tcp arrived at 1611917462 with bad export cookie 4450076092389702753
Jan 29 08:32:36 emds1 kernel: LustreError: 21599:0:(upcall_cache.c:233:upcall_cache_get_entry()) acquire for key 463: error -110
Jan 29 09:46:31 emds1 kernel: LustreError: 137-5: echo-MDT0000_UUID: not available for connect from 10.23.22.103@tcp (no target). If you are running an HA pair check that the target is mounted on the other server.
Jan 29 09:46:31 emds1 kernel: LustreError: Skipped 15 previous similar messages
Jan 29 12:00:37 emds1 kernel: LustreError: 4491:0:(llog_cat.c:395:llog_cat_id2handle()) echo-OST0003-osc-MDT0000: error opening log id [0x5919:0x1:0x0]:0: rc = -2
Jan 29 12:00:37 emds1 kernel: LustreError: 4566:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5938:0x1:0x0]:0 offset 2625536 bytes 4096
Jan 29 12:00:37 emds1 kernel: LustreError: 4566:0:(osp_sync.c:1273:osp_sync_thread()) echo-OST0028-osc-MDT0000: llog process with osp_sync_process_queues failed: -22
Jan 29 12:00:37 emds1 kernel: LustreError: 3562:0:(mdt_handler.c:6696:mdt_iocontrol()) echo-MDT0000: Aborting recovery for device
Jan 29 12:00:37 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:00:48 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:00:58 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:01:08 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:01:18 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:01:28 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:01:48 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:01:48 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) Skipped 1 previous similar message
Jan 29 12:02:28 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:02:28 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) Skipped 3 previous similar messages
Jan 29 12:03:38 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) echo-OST0003-osc-MDT0000: can&apos;t precreate: rc = -28
Jan 29 12:03:38 emds1 kernel: LustreError: 4492:0:(osp_precreate.c:686:osp_precreate_send()) Skipped 8 previous similar messages
Jan 29 14:17:50 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 14:17:51 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 14:17:51 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) Skipped 32 previous similar messages
Jan 29 15:40:44 emds1 kernel: LustreError: 20674:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:40:45 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:40:45 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 4 previous similar messages
Jan 29 15:40:46 emds1 kernel: LustreError: 20674:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:40:46 emds1 kernel: LustreError: 20674:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 2 previous similar messages
Jan 29 15:40:49 emds1 kernel: LustreError: 17802:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:40:49 emds1 kernel: LustreError: 17802:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 6 previous similar messages
Jan 29 15:40:53 emds1 kernel: LustreError: 14628:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:40:53 emds1 kernel: LustreError: 14628:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 13 previous similar messages
Jan 29 15:41:02 emds1 kernel: LustreError: 11297:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:41:02 emds1 kernel: LustreError: 11297:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 22 previous similar messages
Jan 29 15:41:18 emds1 kernel: LustreError: 11297:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:41:18 emds1 kernel: LustreError: 11297:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 33 previous similar messages
Jan 29 15:42:19 emds1 kernel: LustreError: 5958:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:42:19 emds1 kernel: LustreError: 5958:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 42 previous similar messages
Jan 29 15:46:51 emds1 kernel: LustreError: 13249:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:46:51 emds1 kernel: LustreError: 13249:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 6 previous similar messages
Jan 29 15:48:59 emds1 kernel: LustreError: 19955:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:48:59 emds1 kernel: LustreError: 19955:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 1051 previous similar messages
Jan 29 15:59:21 emds1 kernel: LustreError: 19951:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 15:59:21 emds1 kernel: LustreError: 19951:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 289 previous similar messages
Jan 29 16:13:55 emds1 kernel: LustreError: 19940:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 16:13:55 emds1 kernel: LustreError: 19940:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 1392 previous similar messages
Jan 29 16:30:33 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 16:30:33 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 2621 previous similar messages
Jan 29 16:41:48 emds1 kernel: LustreError: 34704:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 16:41:48 emds1 kernel: LustreError: 34704:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 776 previous similar messages
Jan 29 16:54:22 emds1 kernel: LustreError: 34500:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 16:54:22 emds1 kernel: LustreError: 34500:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 4142 previous similar messages
Jan 29 17:05:40 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&apos;t available, run LFSCK
Jan 29 17:05:40 emds1 kernel: LustreError: 37541:0:(osp_sync.c:350:osp_sync_declare_add()) Skipped 3114 previous similar messages
Jan 29 17:22:30 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x2852010002000000: rc = -2
Jan 29 17:22:30 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) Skipped 46 previous similar messages
Jan 29 17:22:47 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0xfa31010002000000: rc = -2
Jan 29 17:23:00 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:23:13 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0xb331010002000000: rc = -2
Jan 29 17:23:28 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:24:25 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:26:21 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:26:21 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) Skipped 1 previous similar message
Jan 29 17:29:52 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x7441010002000000: rc = -2
Jan 29 17:32:15 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:35:50 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:35:50 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) Skipped 2 previous similar messages
Jan 29 17:40:28 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) srv-echo-MDT0000: Cannot find sequence 0x6977010002000000: rc = -2
Jan 29 17:40:28 emds1 kernel: LustreError: 15777:0:(fld_handler.c:264:fld_server_lookup()) Skipped 2 previous similar messages&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="291030" author="cmcl" created="Tue, 2 Feb 2021 22:41:40 +0000"  >&lt;p&gt;Pretty sure I should know the answer to this but I&apos;m having a moment here: how do I tell which OSS and mount point a particular OST refers to?  I need to know which mountpoint to unmount for echo-OST0028_UUID - thanks&lt;/p&gt;</comment>
                            <comment id="291033" author="cmcl" created="Tue, 2 Feb 2021 22:53:52 +0000"  >&lt;p&gt;Ah looks like the log did have the invalid llog tail and I somehow missed it:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# grep -B1 OST0028 2021-01-29-syslog.log
Jan 29 12:00:37 emds1 kernel: LustreError: 4566:0:(llog_osd.c:1000:llog_osd_next_block()) echo-MDT0000-osd: invalid llog tail at log id [0x5938:0x1:0x0]:0 offset 2625536 bytes 4096
Jan 29 12:00:37 emds1 kernel: LustreError: 4566:0:(osp_sync.c:1273:osp_sync_thread()) echo-OST0028-osc-MDT0000: llog process with osp_sync_process_queues failed: -22&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;That one wasn&apos;t on my previous list so guessing removing that should do the trick.  I&apos;ll wait for the next available downtime and remove that one.&lt;/p&gt;</comment>
                            <comment id="293146" author="gerrit" created="Fri, 26 Feb 2021 08:22:44 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/40754/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40754/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: try to skip corrupted llog records&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 910eb97c1b43a44a9da2ae14c3b83e28ca6342fc&lt;/p&gt;</comment>
                            <comment id="293193" author="pjones" created="Fri, 26 Feb 2021 14:42:35 +0000"  >&lt;p&gt;Landed for 2.15&lt;/p&gt;</comment>
                            <comment id="308397" author="gerrit" created="Mon, 26 Jul 2021 06:20:05 +0000"  >&lt;p&gt;Alex Zhuravlev (bzzz@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/44396&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/44396&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: try to skip corrupted llog records&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a761c796b9aecbe90b9692c073e3548ca407efd8&lt;/p&gt;</comment>
                            <comment id="312644" author="gerrit" created="Mon, 13 Sep 2021 19:06:29 +0000"  >&lt;p&gt;&quot;Oleg Drokin &amp;lt;green@whamcloud.com&amp;gt;&quot; merged in patch &lt;a href=&quot;https://review.whamcloud.com/44396/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/44396/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14098&quot; title=&quot;LustreError: 19003:0:(osp_sync.c:350:osp_sync_declare_add()) logging isn&amp;#39;t available, run LFSCK&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14098&quot;&gt;&lt;del&gt;LU-14098&lt;/del&gt;&lt;/a&gt; obdclass: try to skip corrupted llog records&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3bedd69dbef38f5a3166131831b8c5279df4b0dd&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10322">
                    <name>Gantt End to Start</name>
                                            <outwardlinks description="has to be done before">
                                                        </outwardlinks>
                                                        </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="61290">LU-14056</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="61819">LU-14165</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="37364" name="2021-01-29-syslog.log.gz" size="213496" author="cmcl" created="Tue, 2 Feb 2021 22:33:30 +0000"/>
                            <attachment id="36558" name="bmds1-msgs.txt.gz" size="215695" author="cmcl" created="Thu, 5 Nov 2020 10:57:27 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i01dx3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>