<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:49:31 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5214] Failure on test suite replay-ost-single test_5</title>
                <link>https://jira.whamcloud.com/browse/LU-5214</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/221291fa-f523-11e3-b29e-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/221291fa-f523-11e3-b29e-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_5 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

</description>
                <environment>server and client: lustre-master build # 2091 DNE</environment>
        <key id="25190">LU-5214</key>
            <summary>Failure on test suite replay-ost-single test_5</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="ys">Yang Sheng</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 17 Jun 2014 20:30:26 +0000</created>
                <updated>Wed, 16 Jan 2019 05:56:05 +0000</updated>
                            <resolved>Wed, 16 Jan 2019 05:56:05 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                    <version>Lustre 2.10.0</version>
                    <version>Lustre 2.11.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="86968" author="green" created="Wed, 18 Jun 2014 19:45:44 +0000"  >&lt;p&gt;shadow-49vm4 has these locked up threads in the dmesg logs:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNet: Service thread pid 13815 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Pid: 13815, comm: ll_ost_io00_062

Call Trace:
 [&amp;lt;ffffffff815287f3&amp;gt;] io_schedule+0x73/0xc0
 [&amp;lt;ffffffff81267cc8&amp;gt;] get_request_wait+0x108/0x1d0
 [&amp;lt;ffffffff8109af00&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffff8126160e&amp;gt;] ? elv_merge+0x17e/0x1c0
 [&amp;lt;ffffffff81267e29&amp;gt;] blk_queue_bio+0x99/0x620
 [&amp;lt;ffffffff8116e900&amp;gt;] ? cache_alloc_refill+0x1c0/0x240
 [&amp;lt;ffffffff81266ebf&amp;gt;] generic_make_request+0x29f/0x5f0
 [&amp;lt;ffffffffa0002f97&amp;gt;] ? dm_merge_bvec+0xc7/0x100 [dm_mod]
 [&amp;lt;ffffffff81267280&amp;gt;] submit_bio+0x70/0x120
 [&amp;lt;ffffffffa05bf80e&amp;gt;] ? lprocfs_oh_tally+0x2e/0x50 [obdclass]
 [&amp;lt;ffffffffa0d3f80c&amp;gt;] osd_submit_bio+0x1c/0x60 [osd_ldiskfs]
 [&amp;lt;ffffffffa0d3fc3c&amp;gt;] osd_do_bio+0x3ec/0x820 [osd_ldiskfs]
 [&amp;lt;ffffffffa0436878&amp;gt;] ? __ldiskfs_journal_stop+0x68/0xa0 [ldiskfs]
 [&amp;lt;ffffffffa0d4317c&amp;gt;] osd_write_commit+0x31c/0x610 [osd_ldiskfs]
 [&amp;lt;ffffffffa0e63d04&amp;gt;] ofd_commitrw_write+0x604/0xfd0 [ofd]
 [&amp;lt;ffffffffa0e64bfa&amp;gt;] ofd_commitrw+0x52a/0x8c0 [ofd]
 [&amp;lt;ffffffffa05cac31&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa088358d&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
 [&amp;lt;ffffffffa088a7ce&amp;gt;] tgt_brw_write+0xc7e/0x1530 [ptlrpc]
 [&amp;lt;ffffffffa07e6750&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa08892cc&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
 [&amp;lt;ffffffffa0838d3a&amp;gt;] ptlrpc_main+0xd1a/0x1980 [ptlrpc]
 [&amp;lt;ffffffffa0838020&amp;gt;] ? ptlrpc_main+0x0/0x1980 [ptlrpc]
 [&amp;lt;ffffffff8109ab56&amp;gt;] kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff8109aac0&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20

LustreError: dumping log to /tmp/lustre-log.1402825080.13815
LNet: Service thread pid 13815 completed after 50.47s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
LNet: Service thread pid 12699 completed after 62.13s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
LNet: Service thread pid 12663 was inactive for 62.16s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Pid: 12663, comm: ll_ost_io00_016

Call Trace:
 [&amp;lt;ffffffff815287f3&amp;gt;] io_schedule+0x73/0xc0
 [&amp;lt;ffffffff81267cc8&amp;gt;] get_request_wait+0x108/0x1d0
 [&amp;lt;ffffffff8109af00&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffff8126160e&amp;gt;] ? elv_merge+0x17e/0x1c0
 [&amp;lt;ffffffff81267e29&amp;gt;] blk_queue_bio+0x99/0x620
 [&amp;lt;ffffffff8116e637&amp;gt;] ? cache_grow+0x217/0x320
 [&amp;lt;ffffffff81266ebf&amp;gt;] generic_make_request+0x29f/0x5f0
 [&amp;lt;ffffffffa0002f97&amp;gt;] ? dm_merge_bvec+0xc7/0x100 [dm_mod]
 [&amp;lt;ffffffff81267280&amp;gt;] submit_bio+0x70/0x120
 [&amp;lt;ffffffffa05bf80e&amp;gt;] ? lprocfs_oh_tally+0x2e/0x50 [obdclass]
 [&amp;lt;ffffffffa0d3f80c&amp;gt;] osd_submit_bio+0x1c/0x60 [osd_ldiskfs]
 [&amp;lt;ffffffffa0d3fc3c&amp;gt;] osd_do_bio+0x3ec/0x820 [osd_ldiskfs]
 [&amp;lt;ffffffffa0436878&amp;gt;] ? __ldiskfs_journal_stop+0x68/0xa0 [ldiskfs]
 [&amp;lt;ffffffffa0d4317c&amp;gt;] osd_write_commit+0x31c/0x610 [osd_ldiskfs]
 [&amp;lt;ffffffffa0e63d04&amp;gt;] ofd_commitrw_write+0x604/0xfd0 [ofd]
 [&amp;lt;ffffffffa0e64bfa&amp;gt;] ofd_commitrw+0x52a/0x8c0 [ofd]
 [&amp;lt;ffffffffa05cac31&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa088358d&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
 [&amp;lt;ffffffffa088a7ce&amp;gt;] tgt_brw_write+0xc7e/0x1530 [ptlrpc]
 [&amp;lt;ffffffffa07e6750&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa08892cc&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
 [&amp;lt;ffffffffa0838d3a&amp;gt;] ptlrpc_main+0xd1a/0x1980 [ptlrpc]
 [&amp;lt;ffffffffa0838020&amp;gt;] ? ptlrpc_main+0x0/0x1980 [ptlrpc]
 [&amp;lt;ffffffff8109ab56&amp;gt;] kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff8109aac0&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20

Pid: 12682, comm: ll_ost_io00_035

Call Trace:
 [&amp;lt;ffffffffa03dd9b7&amp;gt;] ? jbd2_journal_stop+0x1e7/0x2b0 [jbd2]
 [&amp;lt;ffffffff8109b22e&amp;gt;] ? prepare_to_wait+0x4e/0x80
 [&amp;lt;ffffffffa0d203f5&amp;gt;] osd_trans_stop+0x195/0x550 [osd_ldiskfs]
 [&amp;lt;ffffffff8109af00&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffffa0e5c5ff&amp;gt;] ofd_trans_stop+0x1f/0x60 [ofd]
 [&amp;lt;ffffffffa0e63aa2&amp;gt;] ofd_commitrw_write+0x3a2/0xfd0 [ofd]
 [&amp;lt;ffffffffa0e64bfa&amp;gt;] ofd_commitrw+0x52a/0x8c0 [ofd]
 [&amp;lt;ffffffffa05cac31&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa088358d&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
 [&amp;lt;ffffffffa088a7ce&amp;gt;] tgt_brw_write+0xc7e/0x1530 [ptlrpc]
 [&amp;lt;ffffffffa07e6750&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa08892cc&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
 [&amp;lt;ffffffffa0838d3a&amp;gt;] ptlrpc_main+0xd1a/0x1980 [ptlrpc]
 [&amp;lt;ffffffffa0838020&amp;gt;] ? ptlrpc_main+0x0/0x1980 [ptlrpc]
 [&amp;lt;ffffffff8109ab56&amp;gt;] kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff8109aac0&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20

Pid: 12668, comm: ll_ost_io00_021

Call Trace:
 [&amp;lt;ffffffffa03de08a&amp;gt;] start_this_handle+0x25a/0x480 [jbd2]
 [&amp;lt;ffffffff8109af00&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffffa03de495&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
 [&amp;lt;ffffffffa0436906&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
 [&amp;lt;ffffffffa0d21fdf&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
 [&amp;lt;ffffffffa0d3182a&amp;gt;] ? osd_declare_attr_set+0x13a/0x7b0 [osd_ldiskfs]
 [&amp;lt;ffffffffa0e5c6bc&amp;gt;] ofd_trans_start+0x7c/0x100 [ofd]
 [&amp;lt;ffffffffa0e63c23&amp;gt;] ofd_commitrw_write+0x523/0xfd0 [ofd]
 [&amp;lt;ffffffffa0e64bfa&amp;gt;] ofd_commitrw+0x52a/0x8c0 [ofd]
 [&amp;lt;ffffffffa05cac31&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
 [&amp;lt;ffffffffa088358d&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
 [&amp;lt;ffffffffa088a7ce&amp;gt;] tgt_brw_write+0xc7e/0x1530 [ptlrpc]
 [&amp;lt;ffffffffa07e6750&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
 [&amp;lt;ffffffffa08892cc&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
 [&amp;lt;ffffffffa0838d3a&amp;gt;] ptlrpc_main+0xd1a/0x1980 [ptlrpc]
 [&amp;lt;ffffffffa0838020&amp;gt;] ? ptlrpc_main+0x0/0x1980 [ptlrpc]
 [&amp;lt;ffffffff8109ab56&amp;gt;] kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff8109aac0&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="135937" author="standan" created="Thu, 10 Dec 2015 20:51:26 +0000"  >&lt;p&gt;master, build# 3264, 2.7.64 tag&lt;br/&gt;
Hard Failover: EL6.7 Server/Client - ZFS&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/2dc08784-9ebc-11e5-98a4-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/2dc08784-9ebc-11e5-98a4-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139390" author="standan" created="Wed, 20 Jan 2016 01:42:02 +0000"  >&lt;p&gt;Another instance found for hardfailover: EL6.7 Server/Client - ZFS&lt;br/&gt;
build# 3305&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e3cfd3b2-bbd7-11e5-8506-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e3cfd3b2-bbd7-11e5-8506-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139400" author="standan" created="Wed, 20 Jan 2016 02:17:37 +0000"  >&lt;p&gt;Another instance found for hardfailover: EL7 Server/Client - ZFS&lt;br/&gt;
build# 3305&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/febe1384-bbc6-11e5-8506-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/febe1384-bbc6-11e5-8506-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139405" author="sarah" created="Wed, 20 Jan 2016 04:10:21 +0000"  >&lt;p&gt;instance on master build # 3305 RHEL6.7&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/8b9aed50-bc84-11e5-b3b7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/8b9aed50-bc84-11e5-b3b7-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It looks like this issue affects multiple branches, could this be considered a higher priority?&lt;/p&gt;</comment>
                            <comment id="139772" author="pjones" created="Fri, 22 Jan 2016 18:39:23 +0000"  >&lt;p&gt;YangSheng&lt;/p&gt;

&lt;p&gt;Could you please look into this issue?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="141226" author="standan" created="Thu, 4 Feb 2016 19:00:09 +0000"  >&lt;p&gt;Another instance occurred for FULL - EL6.7 Server/EL6.7 Client - ZFS  , master , build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/98eb99ce-cb47-11e5-a59a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/98eb99ce-cb47-11e5-a59a-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance on master for FULL - EL7.1 Server/EL7.1 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ddc75dc6-cb88-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ddc75dc6-cb88-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="240068" author="ys" created="Wed, 16 Jan 2019 05:56:05 +0000"  >&lt;p&gt;Please reopen it if hit again.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="45109">LU-9273</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="24369">LU-4950</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="26283">LU-5575</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwp9j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>14548</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>