<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:24:12 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2318] Remove &quot;Found index ..., updating log&quot; message</title>
                <link>https://jira.whamcloud.com/browse/LU-2318</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;What is the purpose of this message?&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2012-11-13 09:54:48 Lustre: Found index 0 for lstest-MDT0000, updating log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I believe it&apos;s generated here:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;3009         if (rc == EALREADY) {                                                   
3010                 LCONSOLE_WARN(&quot;Found index %d for %s, updating log\n&quot;,          
3011                               mti-&amp;gt;mti_stripe_index, mti-&amp;gt;mti_svname);                    
3012                 /* We would like to mark old log sections as invalid            
3013                    and add new log sections in the client and mdt logs.         
3014                    But if we add new sections, then live clients will           
3015                    get repeat setup instructions for already running            
3016                    osc&apos;s. So don&apos;t update the client/mdt logs. */               
3017                 mti-&amp;gt;mti_flags &amp;amp;= ~LDD_F_UPDATE;                                
3018         }  
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I don&apos;t see it serving a useful purpose to a administrator, so lets remove it.&lt;/p&gt;</description>
                <environment></environment>
        <key id="16665">LU-2318</key>
            <summary>Remove &quot;Found index ..., updating log&quot; message</summary>
                <type id="4" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11310&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="prakash">Prakash Surya</reporter>
                        <labels>
                            <label>shh</label>
                    </labels>
                <created>Tue, 13 Nov 2012 19:24:08 +0000</created>
                <updated>Tue, 20 Nov 2012 15:17:39 +0000</updated>
                            <resolved>Tue, 20 Nov 2012 15:17:39 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="47767" author="pjones" created="Tue, 13 Nov 2012 22:21:18 +0000"  >&lt;p&gt;Alex &lt;/p&gt;

&lt;p&gt;Can you please triage and assign this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="47841" author="bzzz" created="Thu, 15 Nov 2012 10:23:37 +0000"  >&lt;p&gt;hm, can you describe the case with more details please? I think we fixed the major source of these messages. basically a regular mount should not cause this.&lt;br/&gt;
I&apos;m not saying the message should stay as is, but I&apos;d like to make sure this is not a side effect of some misbehavior.&lt;/p&gt;</comment>
                            <comment id="47890" author="morrone" created="Thu, 15 Nov 2012 19:26:44 +0000"  >&lt;p&gt;Console messages after boot on our test filesystem yesterday:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2012-11-14 17:20:03 Mounting grove-mds1/mgs on /mnt/lustre/local/ls1-MGS0000
2012-11-14 17:20:03 Lustre: Lustre: Build Version: 2.3.54-6chaos-6chaos--PRISTINE-2.6.32-220.23.1.2chaos.ch5.x86_64
2012-11-14 17:20:05 Mounting grove-mds1/mdt0 on /mnt/lustre/local/ls1-MDT0000
2012-11-14 17:20:05 Lustre: Found index 0 for ls1-MDT0000, updating log
2012-11-14 17:20:05 LustreError: 31856:0:(mgc_request.c:248:do_config_log_add()) failed processing sptlrpc log: -2
2012-11-14 17:20:05 LustreError: 31859:0:(sec_config.c:1024:sptlrpc_target_local_copy_conf()) missing llog context
2012-11-14 17:20:09 Lustre: ls1-MDT0000: Temporarily refusing client connection from 0@lo
2012-11-14 17:20:09 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:20:09 LustreError: 31856:0:(mdd_lfsck.c:323:mdd_lfsck_setup()) ls1-MDD0000: Lustre LFSCK unsupported on this device.
2012-11-14 17:20:09 Lustre: 31856:0:(fld_index.c:319:fld_index_init()) srv-ls1-MDT0000: File &quot;fld&quot; doesn&apos;t support range lookup, using stub. DNE and FIDs on OST will not work with this backend
2012-11-14 17:20:10 grove-mds1 login: Lustre: ls1-MDT0000: Will be in recovery for at least 5:00, or until 3 clients reconnect.
2012-11-14 17:20:34 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:20:59 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:21:49 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:22:14 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:22:58 grove-mds1 login: LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:23:29 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:23:42 INFO: task tgt_recov:656 blocked for more than 120 seconds.
2012-11-14 17:23:42 &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
2012-11-14 17:23:42 tgt_recov     D 000000000000000d     0   656      2 0x00000000
2012-11-14 17:23:42  ffff882f89179e10 0000000000000046 0000000000000000 ffffffff81052026
2012-11-14 17:23:42  ffff882f89179da0 ffff882fcdd12ae0 ffff882f89179da0 ffffffff8104da6d
2012-11-14 17:23:42  ffff882f89177af8 ffff882f89179fd8 000000000000f4e8 ffff882f89177af8
2012-11-14 17:23:42 Call Trace:
2012-11-14 17:23:42  [&amp;lt;ffffffff81052026&amp;gt;] ? enqueue_task+0x66/0x80
2012-11-14 17:23:42  [&amp;lt;ffffffff8104da6d&amp;gt;] ? check_preempt_curr+0x6d/0x90
2012-11-14 17:23:42  [&amp;lt;ffffffffa087b330&amp;gt;] ? check_for_clients+0x0/0x90 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffffa087cd25&amp;gt;] target_recovery_overseer+0x95/0x250 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffffa087b130&amp;gt;] ? exp_connect_healthy+0x0/0x20 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffff81091090&amp;gt;] ? autoremove_wake_function+0x0/0x40
2012-11-14 17:23:42  [&amp;lt;ffffffffa0883f8e&amp;gt;] target_recovery_thread+0x58e/0x19d0 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffff8106ed2f&amp;gt;] ? do_exit+0x5af/0x870
2012-11-14 17:23:42  [&amp;lt;ffffffffa0883a00&amp;gt;] ? target_recovery_thread+0x0/0x19d0 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffff8100c14a&amp;gt;] child_rip+0xa/0x20
2012-11-14 17:23:42  [&amp;lt;ffffffffa0883a00&amp;gt;] ? target_recovery_thread+0x0/0x19d0 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffffa0883a00&amp;gt;] ? target_recovery_thread+0x0/0x19d0 [ptlrpc]
2012-11-14 17:23:42  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
2012-11-14 17:24:01 Lustre: Found index 130 for ls1-OST0082, updating log
2012-11-14 17:24:01 Lustre: Found index 81 for ls1-OST0051, updating log
2012-11-14 17:24:01 Lustre: Skipped 3 previous similar messages
2012-11-14 17:24:02 Lustre: Found index 33 for ls1-OST0021, updating log
2012-11-14 17:24:02 Lustre: Skipped 4 previous similar messages
2012-11-14 17:24:05 Lustre: Found index 354 for ls1-OST0162, updating log
2012-11-14 17:24:05 Lustre: Skipped 17 previous similar messages
2012-11-14 17:24:09 Lustre: Found index 357 for ls1-OST0165, updating log
2012-11-14 17:24:09 Lustre: Skipped 50 previous similar messages
2012-11-14 17:24:17 Lustre: Found index 150 for ls1-OST0096, updating log
2012-11-14 17:24:17 Lustre: Skipped 126 previous similar messages
2012-11-14 17:24:19 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:24:33 Lustre: Found index 280 for ls1-OST0118, updating log
2012-11-14 17:24:33 Lustre: Skipped 31 previous similar messages
2012-11-14 17:24:51 grove-mds1 login: LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:25:14 LustreError: Skipped 1 previous similar message
2012-11-14 17:25:18 Lustre: ls1-MDT0000: Recovery over after 5:00, of 3 clients 2 recovered and 1 was evicted.
2012-11-14 17:26:29 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:26:29 LustreError: Skipped 2 previous similar messages
2012-11-14 17:28:59 LustreError: 11-0: ls1-MDT0000-osp-MDT0000: Communicating with 0@lo, operation mds_connect failed with -11
2012-11-14 17:28:59 LustreError: Skipped 5 previous similar messages
2012-11-14 17:29:03 Lustre: ls1-OST0031-osc-MDT0000: Connection restored to ls1-OST0031 (at 172.20.1.49@o2ib500)
2012-11-14 17:29:03 LustreError: 32077:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xcef:1
2012-11-14 17:29:03 Lustre: ls1-OST0032-osc-MDT0000: Connection restored to ls1-OST0032 (at 172.20.1.50@o2ib500)
2012-11-14 17:29:03 LustreError: 32080:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xcf0:1
2012-11-14 17:29:06 Lustre: ls1-OST0092-osc-MDT0000: Connection restored to ls1-OST0092 (at 172.20.1.146@o2ib500)
2012-11-14 17:29:06 LustreError: 32368:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xd50:1
2012-11-14 17:29:06 LustreError: 32368:0:(osp_sync.c:584:osp_sync_process_record()) Skipped 6 previous similar messages
2012-11-14 17:29:06 Lustre: Skipped 6 previous similar messages
2012-11-14 17:29:24 Lustre: ls1-OST002e-osc-MDT0000: Connection restored to ls1-OST002e (at 172.20.1.46@o2ib500)
2012-11-14 17:29:24 LustreError: 32068:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xcec:1
2012-11-14 17:29:24 LustreError: 32068:0:(osp_sync.c:584:osp_sync_process_record()) Skipped 7 previous similar messages
2012-11-14 17:29:24 Lustre: Skipped 7 previous similar messages
2012-11-14 17:29:53 Lustre: ls1-OST00b4-osc-MDT0000: Connection restored to ls1-OST00b4 (at 172.20.1.180@o2ib500)
2012-11-14 17:29:53 LustreError: 32470:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xd72:1
2012-11-14 17:29:53 LustreError: 32470:0:(osp_sync.c:584:osp_sync_process_record()) Skipped 212 previous similar messages
2012-11-14 17:29:53 Lustre: Skipped 213 previous similar messages
2012-11-14 17:30:18 Lustre: ls1-OST010e-osc-MDT0000: Connection restored to ls1-OST010e (at 172.20.2.70@o2ib500)
2012-11-14 17:30:18 LustreError: 32740:0:(osp_sync.c:584:osp_sync_process_record()) processed all old entries: 0xdcc:1
2012-11-14 17:30:18 LustreError: 32740:0:(osp_sync.c:584:osp_sync_process_record()) Skipped 70 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</comment>
                            <comment id="48071" author="bzzz" created="Tue, 20 Nov 2012 10:02:33 +0000"  >&lt;p&gt; &lt;a href=&quot;http://review.whamcloud.com/4631&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4631&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48085" author="prakash" created="Tue, 20 Nov 2012 12:58:11 +0000"  >&lt;p&gt;Thanks Alex! I&apos;m curious.. What is this log for? Is it part of the MGS and only needs to be updated when a filesystem configuration change happens, i.e. &lt;tt&gt;LDD_F_VIRGIN&lt;/tt&gt; or &lt;tt&gt;LDD_F_WRITECONF&lt;/tt&gt;?&lt;/p&gt;</comment>
                            <comment id="48086" author="bzzz" created="Tue, 20 Nov 2012 13:01:32 +0000"  >&lt;p&gt;that&apos;s correct Prakash. literally there is no need to update the log every time.&lt;/p&gt;</comment>
                            <comment id="48126" author="bzzz" created="Tue, 20 Nov 2012 15:17:39 +0000"  >&lt;p&gt;I&apos;m closing this as it&apos;s covered in the different patch under &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2156&quot; title=&quot;master is very noisy (after latest massive landings)&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2156&quot;&gt;&lt;del&gt;LU-2156&lt;/del&gt;&lt;/a&gt;. sorry for confusion.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvc5z:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5542</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>