<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:04:11 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6893] recovery-double-scale test_pairwise_fail: mount failed</title>
                <link>https://jira.whamcloud.com/browse/LU-6893</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/816ceeca-2623-11e5-92e6-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/816ceeca-2623-11e5-92e6-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_pairwise_fail failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mount /mnt/lustre on onyx-34vm6 failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OST dmesg&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2212.173927] Lustre: DEBUG MARKER: ==== Checking the clients loads BEFORE failover -- failure NOT OK
[ 2212.998458] Lustre: DEBUG MARKER: /usr/sbin/lctl mark Done checking client loads. Failing type1=clients item1=onyx-34vm5,onyx-34vm6 ... 
[ 2213.336608] Lustre: DEBUG MARKER: Done checking client loads. Failing type1=clients item1=onyx-34vm5,onyx-34vm6 ...
[ 2253.919179] LNet: Service thread pid 4319 was inactive for 40.03s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 2253.926365] Pid: 4319, comm: ll_ost_io00_067
[ 2253.928076] 
Call Trace:
[ 2253.931101]  [&amp;lt;ffffffff8160a409&amp;gt;] schedule+0x29/0x70
[ 2253.933951]  [&amp;lt;ffffffff816082b5&amp;gt;] schedule_timeout+0x175/0x2d0
[ 2253.936095]  [&amp;lt;ffffffffa081e3aa&amp;gt;] ? ptlrpc_start_bulk_transfer+0x16a/0x710 [ptlrpc]
[ 2253.938049]  [&amp;lt;ffffffff8107ee80&amp;gt;] ? process_timeout+0x0/0x10
[ 2253.940092]  [&amp;lt;ffffffffa07e2cae&amp;gt;] target_bulk_io+0x4de/0xb00 [ptlrpc]
[ 2253.941956]  [&amp;lt;ffffffff810a9650&amp;gt;] ? default_wake_function+0x0/0x20
[ 2253.944070]  [&amp;lt;ffffffffa088f941&amp;gt;] tgt_brw_write+0x10b1/0x1650 [ptlrpc]
[ 2253.945900]  [&amp;lt;ffffffff812dfbab&amp;gt;] ? string.isra.6+0x3b/0xf0
[ 2253.947956]  [&amp;lt;ffffffffa07e01f0&amp;gt;] ? target_bulk_timeout+0x0/0xb0 [ptlrpc]
[ 2253.949879]  [&amp;lt;ffffffffa088b29b&amp;gt;] tgt_request_handle+0x88b/0x1100 [ptlrpc]
[ 2253.952132]  [&amp;lt;ffffffffa0832fbb&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
[ 2253.954241]  [&amp;lt;ffffffffa0830078&amp;gt;] ? ptlrpc_wait_event+0x98/0x340 [ptlrpc]
[ 2253.956362]  [&amp;lt;ffffffffa0836900&amp;gt;] ptlrpc_main+0xc00/0x1f60 [ptlrpc]
[ 2253.958180]  [&amp;lt;ffffffff810ad8b6&amp;gt;] ? __dequeue_entity+0x26/0x40
[ 2253.960260]  [&amp;lt;ffffffffa0835d00&amp;gt;] ? ptlrpc_main+0x0/0x1f60 [ptlrpc]
[ 2253.962104]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[ 2253.965627]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
[ 2253.967444]  [&amp;lt;ffffffff81614f7c&amp;gt;] ret_from_fork+0x7c/0xb0
[ 2253.969266]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0

[ 2253.972575] LustreError: dumping log to /tmp/lustre-log.1436386922.4319
[ 2254.275273] Pid: 4320, comm: ll_ost_io00_068
[ 2254.279470] 
Call Trace:
[ 2254.285875]  [&amp;lt;ffffffff8160a409&amp;gt;] schedule+0x29/0x70
[ 2254.287722]  [&amp;lt;ffffffff816082b5&amp;gt;] schedule_timeout+0x175/0x2d0
[ 2254.289620]  [&amp;lt;ffffffffa081e3aa&amp;gt;] ? ptlrpc_start_bulk_transfer+0x16a/0x710 [ptlrpc]
[ 2254.291530]  [&amp;lt;ffffffff8107ee80&amp;gt;] ? process_timeout+0x0/0x10
[ 2254.293318]  [&amp;lt;ffffffffa07e2cae&amp;gt;] target_bulk_io+0x4de/0xb00 [ptlrpc]
[ 2254.295041]  [&amp;lt;ffffffff810a9650&amp;gt;] ? default_wake_function+0x0/0x20
[ 2254.296794]  [&amp;lt;ffffffffa088f941&amp;gt;] tgt_brw_write+0x10b1/0x1650 [ptlrpc]
[ 2254.298482]  [&amp;lt;ffffffff812dfbab&amp;gt;] ? string.isra.6+0x3b/0xf0
[ 2254.300158]  [&amp;lt;ffffffffa07e01f0&amp;gt;] ? target_bulk_timeout+0x0/0xb0 [ptlrpc]
[ 2254.301808]  [&amp;lt;ffffffffa088b29b&amp;gt;] tgt_request_handle+0x88b/0x1100 [ptlrpc]
[ 2254.303486]  [&amp;lt;ffffffffa0832fbb&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
[ 2254.305306]  [&amp;lt;ffffffffa0830078&amp;gt;] ? ptlrpc_wait_event+0x98/0x340 [ptlrpc]
[ 2254.306949]  [&amp;lt;ffffffffa0836900&amp;gt;] ptlrpc_main+0xc00/0x1f60 [ptlrpc]
[ 2254.308571]  [&amp;lt;ffffffff810ad8b6&amp;gt;] ? __dequeue_entity+0x26/0x40
[ 2254.310115]  [&amp;lt;ffffffff810125f6&amp;gt;] ? __switch_to+0x136/0x4a0
[ 2254.311663]  [&amp;lt;ffffffffa0835d00&amp;gt;] ? ptlrpc_main+0x0/0x1f60 [ptlrpc]
[ 2254.313179]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[ 2254.314588]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
[ 2254.315973]  [&amp;lt;ffffffff81614f7c&amp;gt;] ret_from_fork+0x7c/0xb0
[ 2254.317357]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0

[ 2254.319694] Pid: 4330, comm: ll_ost_io00_071
[ 2254.320938] 
Call Trace:
[ 2254.323020]  [&amp;lt;ffffffff8160a409&amp;gt;] schedule+0x29/0x70
[ 2254.324327]  [&amp;lt;ffffffff816082b5&amp;gt;] schedule_timeout+0x175/0x2d0
[ 2254.325717]  [&amp;lt;ffffffffa081e3aa&amp;gt;] ? ptlrpc_start_bulk_transfer+0x16a/0x710 [ptlrpc]
[ 2254.327192]  [&amp;lt;ffffffff8107ee80&amp;gt;] ? process_timeout+0x0/0x10
[ 2254.328564]  [&amp;lt;ffffffffa07e2cae&amp;gt;] target_bulk_io+0x4de/0xb00 [ptlrpc]
[ 2254.329942]  [&amp;lt;ffffffff810a9650&amp;gt;] ? default_wake_function+0x0/0x20
[ 2254.331377]  [&amp;lt;ffffffffa088f941&amp;gt;] tgt_brw_write+0x10b1/0x1650 [ptlrpc]
[ 2254.332768]  [&amp;lt;ffffffff812dfbab&amp;gt;] ? string.isra.6+0x3b/0xf0
[ 2254.334173]  [&amp;lt;ffffffffa07e01f0&amp;gt;] ? target_bulk_timeout+0x0/0xb0 [ptlrpc]
[ 2254.335613]  [&amp;lt;ffffffffa088b29b&amp;gt;] tgt_request_handle+0x88b/0x1100 [ptlrpc]
[ 2254.337071]  [&amp;lt;ffffffffa0832fbb&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
[ 2254.338680]  [&amp;lt;ffffffffa0830078&amp;gt;] ? ptlrpc_wait_event+0x98/0x340 [ptlrpc]
[ 2254.340149]  [&amp;lt;ffffffffa0836900&amp;gt;] ptlrpc_main+0xc00/0x1f60 [ptlrpc]
[ 2254.341516]  [&amp;lt;ffffffff810ad8b6&amp;gt;] ? __dequeue_entity+0x26/0x40
[ 2254.342882]  [&amp;lt;ffffffff810125f6&amp;gt;] ? __switch_to+0x136/0x4a0
[ 2254.344241]  [&amp;lt;ffffffffa0835d00&amp;gt;] ? ptlrpc_main+0x0/0x1f60 [ptlrpc]
[ 2254.345619]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[ 2254.346921]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
[ 2254.348223]  [&amp;lt;ffffffff81614f7c&amp;gt;] ret_from_fork+0x7c/0xb0
[ 2254.349523]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0

[ 2254.351850] Pid: 4257, comm: ll_ost_io00_039
[ 2254.353101] 
Call Trace:
[ 2254.355139]  [&amp;lt;ffffffff8160a409&amp;gt;] schedule+0x29/0x70
[ 2254.356389]  [&amp;lt;ffffffff816082b5&amp;gt;] schedule_timeout+0x175/0x2d0
[ 2254.357738]  [&amp;lt;ffffffffa081e3aa&amp;gt;] ? ptlrpc_start_bulk_transfer+0x16a/0x710 [ptlrpc]
[ 2254.359179]  [&amp;lt;ffffffff8107ee80&amp;gt;] ? process_timeout+0x0/0x10
[ 2254.360681]  [&amp;lt;ffffffffa07e2cae&amp;gt;] target_bulk_io+0x4de/0xb00 [ptlrpc]
[ 2254.362039]  [&amp;lt;ffffffff810a9650&amp;gt;] ? default_wake_function+0x0/0x20
[ 2254.363468]  [&amp;lt;ffffffffa088f941&amp;gt;] tgt_brw_write+0x10b1/0x1650 [ptlrpc]
[ 2254.364833]  [&amp;lt;ffffffff812dfbab&amp;gt;] ? string.isra.6+0x3b/0xf0
[ 2254.366180]  [&amp;lt;ffffffffa07e01f0&amp;gt;] ? target_bulk_timeout+0x0/0xb0 [ptlrpc]
[ 2254.367586]  [&amp;lt;ffffffffa088b29b&amp;gt;] tgt_request_handle+0x88b/0x1100 [ptlrpc]
[ 2254.369024]  [&amp;lt;ffffffffa0832fbb&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
[ 2254.370488]  [&amp;lt;ffffffffa0830078&amp;gt;] ? ptlrpc_wait_event+0x98/0x340 [ptlrpc]
[ 2254.371916]  [&amp;lt;ffffffffa0836900&amp;gt;] ptlrpc_main+0xc00/0x1f60 [ptlrpc]
[ 2254.373266]  [&amp;lt;ffffffff810ad8b6&amp;gt;] ? __dequeue_entity+0x26/0x40
[ 2254.374639]  [&amp;lt;ffffffffa0835d00&amp;gt;] ? ptlrpc_main+0x0/0x1f60 [ptlrpc]
[ 2254.375995]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[ 2254.377272]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
[ 2254.378521]  [&amp;lt;ffffffff81614f7c&amp;gt;] ret_from_fork+0x7c/0xb0
[ 2254.379843]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0

[ 2254.382140] LNet: Service thread pid 2840 was inactive for 40.17s. Watchdog stack traces are limited to 3 per 300 seconds, skipping this one.
[ 2255.071164] LNet: Service thread pid 4267 was inactive for 40.03s. Watchdog stack traces are limited to 3 per 300 seconds, skipping this one.
[ 2255.076671] LNet: Skipped 7 previous similar messages
[ 2255.078304] LustreError: dumping log to /tmp/lustre-log.1436386923.4267
[ 2264.610805] Lustre: lustre-OST0001: haven&apos;t heard from client 2d4be017-9a1c-7408-76c8-bc0239710d98 (at 10.2.4.133@tcp) in 49 seconds. I think it&apos;s dead, and I am evicting it. exp ffff88004084b000, cur 1436386933 expire 1436386903 last 1436386884
[ 2264.617306] Lustre: Skipped 6 previous similar messages
[ 2266.890151] LustreError: 4269:0:(ldlm_lib.c:3077:target_bulk_io()) @@@ Eviction on bulk WRITE  req@ffff880051723000 x1506160196432764/t0(0) o4-&amp;gt;2d4be017-9a1c-7408-76c8-bc0239710d98@10.2.4.133@tcp:199/0 lens 608/448 e 2 to 0 dl 1436386944 ref 1 fl Interpret:/0/0 rc 0/0
[ 2266.905189] Lustre: lustre-OST0003: Bulk IO write error with 2d4be017-9a1c-7408-76c8-bc0239710d98 (at 10.2.4.133@tcp), client will retry: rc -107
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server: lustre-master build # 3093 RHEL7.1&lt;br/&gt;
client: SLES11 SP3</environment>
        <key id="31174">LU-6893</key>
            <summary>recovery-double-scale test_pairwise_fail: mount failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="jay">Jinshan Xiong</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 21 Jul 2015 21:13:15 +0000</created>
                <updated>Thu, 8 Feb 2018 18:31:26 +0000</updated>
                            <resolved>Thu, 8 Feb 2018 18:31:26 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxiq7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>