<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:42:45 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4439] Test failure on replay-single test_70b: rundbench load failed</title>
                <link>https://jira.whamcloud.com/browse/LU-4439</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Nathaniel Clark &amp;lt;nathaniel.l.clark@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/80971760-7539-11e3-936d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/80971760-7539-11e3-936d-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_70b failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;rundbench load on .* failed!&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: replay-single 70b&lt;/p&gt;</description>
                <environment></environment>
        <key id="22635">LU-4439</key>
            <summary>Test failure on replay-single test_70b: rundbench load failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>performance</label>
                            <label>zfs</label>
                    </labels>
                <created>Mon, 6 Jan 2014 14:46:09 +0000</created>
                <updated>Wed, 14 Oct 2015 17:42:25 +0000</updated>
                            <resolved>Tue, 13 Oct 2015 05:25:17 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="74660" author="utopiabound" created="Thu, 9 Jan 2014 16:57:27 +0000"  >&lt;p&gt;For review-zfs failures, the errors are thus:&lt;/p&gt;

&lt;ul&gt;
	&lt;li&gt;dbench on client gets a &quot;No space left on device&quot; error&lt;/li&gt;
	&lt;li&gt;MDT:
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;17:25:20:LustreError: 8125:0:(osp_precreate.c:481:osp_precreate_send()) lustre-OST0001-osc-MDT0000: can&apos;t precreate: rc = -5
17:25:20:LustreError: 8125:0:(osp_precreate.c:984:osp_precreate_thread()) lustre-OST0001-osc-MDT0000: cannot precreate objects: rc = -5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;&lt;/li&gt;
	&lt;li&gt;OST:
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;17:24:36:Lustre: DEBUG MARKER: /usr/sbin/lctl mark test_70b fail mds1 1 times
17:24:36:Lustre: DEBUG MARKER: test_70b fail mds1 1 times
17:24:36:LNet: Service thread pid 9687 was inactive for 62.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
17:24:36:LNet: Skipped 3 previous similar messages
17:24:36:Pid: 9687, comm: ll_ost_io00_019
17:24:36:
17:24:36:Call Trace:
17:24:36: [&amp;lt;ffffffff81096fae&amp;gt;] ? prepare_to_wait_exclusive+0x4e/0x80
17:24:36: [&amp;lt;ffffffffa0135edd&amp;gt;] cv_wait_common+0xed/0x100 [spl]
17:24:36: [&amp;lt;ffffffff81096da0&amp;gt;] ? autoremove_wake_function+0x0/0x40
17:24:36: [&amp;lt;ffffffffa0135f45&amp;gt;] __cv_wait+0x15/0x20 [spl]
17:24:36: [&amp;lt;ffffffffa0234e9b&amp;gt;] txg_wait_open+0x7b/0xa0 [zfs]
17:24:36: [&amp;lt;ffffffffa01f9a5d&amp;gt;] dmu_tx_wait+0xed/0xf0 [zfs]
17:24:36: [&amp;lt;ffffffffa01f9aee&amp;gt;] dmu_tx_assign+0x8e/0x4e0 [zfs]
17:24:36: [&amp;lt;ffffffffa0e9c56c&amp;gt;] osd_trans_start+0x9c/0x410 [osd_zfs]
17:24:36: [&amp;lt;ffffffffa0f4947c&amp;gt;] ofd_trans_start+0x7c/0x100 [ofd]
17:24:36: [&amp;lt;ffffffffa0f4e5c3&amp;gt;] ofd_commitrw_write+0x523/0xfd0 [ofd]
17:24:36: [&amp;lt;ffffffffa0f4fe13&amp;gt;] ofd_commitrw+0x5a3/0xad0 [ofd]
17:24:36: [&amp;lt;ffffffffa0722271&amp;gt;] ? lprocfs_counter_add+0x151/0x1c0 [obdclass]
17:24:36: [&amp;lt;ffffffffa09ccb7d&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa09d1318&amp;gt;] tgt_brw_write+0xd18/0x1550 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa05d927b&amp;gt;] ? cfs_set_ptldebug_header+0x2b/0xc0 [libcfs]
17:24:36: [&amp;lt;ffffffffa09357c0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa09d2fea&amp;gt;] tgt_handle_request0+0x2ea/0x1490 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa09d45ca&amp;gt;] tgt_request_handle+0x43a/0x980 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa0987725&amp;gt;] ptlrpc_main+0xd25/0x1970 [ptlrpc]
17:24:36: [&amp;lt;ffffffffa0986a00&amp;gt;] ? ptlrpc_main+0x0/0x1970 [ptlrpc]
17:24:36: [&amp;lt;ffffffff81096a36&amp;gt;] kthread+0x96/0xa0
17:24:36: [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
17:24:37: [&amp;lt;ffffffff810969a0&amp;gt;] ? kthread+0x0/0xa0
17:24:37: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="75576" author="utopiabound" created="Fri, 24 Jan 2014 18:59:01 +0000"  >&lt;p&gt;Similar occurrence of blocked ll_ost_io thread in replay-single/10&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6c773394-8445-11e3-a862-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6c773394-8445-11e3-a862-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;OST log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;20:59:40:Lustre: DEBUG MARKER: == replay-single test 10: create |X| rename unlink == 20:59:01 (1390453141)
20:59:40:LustreError: 0:0:(ldlm_lockd.c:344:waiting_locks_callback()) ### lock callback timer expired after 210s: evicting client at 10.10.16.209@tcp  ns: filter-lustre-OST0001_UUID lock: ffff88001a728540/0xfea81e9df99a13dc lrc: 3/0,0 mode: PW/PW res: [0x78c5:0x0:0x0].0 rrc: 2 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;4095) flags: 0x60000000010020 nid: 10.10.16.209@tcp remote: 0xa2e90a29d78a83f6 expref: 115 pid: 26117 timeout: 4308758774 lvb_type: 0
20:59:40:LustreError: 0:0:(ldlm_lockd.c:344:waiting_locks_callback()) Skipped 1 previous similar message
21:00:13:Lustre: lustre-OST0001: Export ffff88006ddd4800 already connecting from 10.10.16.209@tcp
21:00:13:Lustre: Skipped 10 previous similar messages
21:00:13:INFO: task ll_ost00_002:25056 blocked for more than 120 seconds.
21:00:13:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
21:00:13:ll_ost00_002  D 0000000000000000     0 25056      2 0x00000080
21:00:13: ffff8800735e9a10 0000000000000046 ffff88006f2716c0 ffffc900076dae40
21:00:13: ffff8800735e99f0 ffffffffa01fa09a ffff8800735e9a60 0000000000000000
21:00:13: ffff880079bc3098 ffff8800735e9fd8 000000000000fb88 ffff880079bc3098
21:00:13:Call Trace:
21:00:13: [&amp;lt;ffffffffa01fa09a&amp;gt;] ? dmu_tx_count_twig+0x10a/0x1a0 [zfs]
21:00:13: [&amp;lt;ffffffff81096fae&amp;gt;] ? prepare_to_wait_exclusive+0x4e/0x80
21:00:13: [&amp;lt;ffffffffa0135edd&amp;gt;] cv_wait_common+0xed/0x100 [spl]
21:00:13: [&amp;lt;ffffffff81096da0&amp;gt;] ? autoremove_wake_function+0x0/0x40
21:00:13: [&amp;lt;ffffffffa0135f45&amp;gt;] __cv_wait+0x15/0x20 [spl]
21:00:13: [&amp;lt;ffffffffa0234e9b&amp;gt;] txg_wait_open+0x7b/0xa0 [zfs]
21:00:13: [&amp;lt;ffffffffa01f9a5d&amp;gt;] dmu_tx_wait+0xed/0xf0 [zfs]
21:00:13: [&amp;lt;ffffffffa01f9aee&amp;gt;] dmu_tx_assign+0x8e/0x4e0 [zfs]
21:00:13: [&amp;lt;ffffffffa0e1c531&amp;gt;] ? osd_declare_quota+0x1c1/0x2d0 [osd_zfs]
21:00:13: [&amp;lt;ffffffffa0e1756c&amp;gt;] osd_trans_start+0x9c/0x410 [osd_zfs]
21:00:13: [&amp;lt;ffffffffa09d4cfe&amp;gt;] tgt_client_data_update+0x26e/0x5a0 [ptlrpc]
21:00:13: [&amp;lt;ffffffffa09d5280&amp;gt;] tgt_client_del+0x250/0x510 [ptlrpc]
21:00:13: [&amp;lt;ffffffffa0ece47e&amp;gt;] ? ofd_grant_discard+0x3e/0x1c0 [ofd]
21:00:14: [&amp;lt;ffffffffa0eba55b&amp;gt;] ofd_obd_disconnect+0x1bb/0x200 [ofd]
21:00:14: [&amp;lt;ffffffffa093c481&amp;gt;] target_handle_disconnect+0x1b1/0x480 [ptlrpc]
21:00:14: [&amp;lt;ffffffffa09dd6d9&amp;gt;] tgt_disconnect+0x39/0x160 [ptlrpc]
21:00:14: [&amp;lt;ffffffffa09ddf2c&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
21:00:14: [&amp;lt;ffffffffa098d61a&amp;gt;] ptlrpc_main+0xd1a/0x1970 [ptlrpc]
21:00:14: [&amp;lt;ffffffffa098c900&amp;gt;] ? ptlrpc_main+0x0/0x1970 [ptlrpc]
21:00:14: [&amp;lt;ffffffff81096a36&amp;gt;] kthread+0x96/0xa0
21:00:14: [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
21:00:14: [&amp;lt;ffffffff810969a0&amp;gt;] ? kthread+0x0/0xa0
21:00:14: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="75679" author="utopiabound" created="Mon, 27 Jan 2014 15:27:56 +0000"  >&lt;p&gt;review-zfs recovery-small/27&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/ffbbaa16-851e-11e3-ac35-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/ffbbaa16-851e-11e3-ac35-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jan 23 20:54:34 wtm-14vm4 kernel: LNet: Service thread pid 18275 was inactive for 136.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Jan 23 20:54:34 wtm-14vm4 kernel: Pid: 18275, comm: ll_ost_io00_016
Jan 23 20:54:34 wtm-14vm4 kernel: 
Jan 23 20:54:34 wtm-14vm4 kernel: Call Trace:
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff81096fae&amp;gt;] ? prepare_to_wait_exclusive+0x4e/0x80
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0135edd&amp;gt;] cv_wait_common+0xed/0x100 [spl]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff81096da0&amp;gt;] ? autoremove_wake_function+0x0/0x40
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0135f45&amp;gt;] __cv_wait+0x15/0x20 [spl]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0234e9b&amp;gt;] txg_wait_open+0x7b/0xa0 [zfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa01f9a5d&amp;gt;] dmu_tx_wait+0xed/0xf0 [zfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa01f9aee&amp;gt;] dmu_tx_assign+0x8e/0x4e0 [zfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0e1756c&amp;gt;] osd_trans_start+0x9c/0x410 [osd_zfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0ec30ec&amp;gt;] ofd_trans_start+0x7c/0x100 [ofd]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0ec7df3&amp;gt;] ofd_commitrw_write+0x523/0xfd0 [ofd]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa0ec8dca&amp;gt;] ofd_commitrw+0x52a/0x8c0 [ofd]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa09d81dd&amp;gt;] obd_commitrw.clone.0+0x11d/0x390 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa09df3fe&amp;gt;] tgt_brw_write+0xc7e/0x1530 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa05e9a81&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa05e9a81&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa093b7c0&amp;gt;] ? target_bulk_timeout+0x0/0xc0 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa09ddefc&amp;gt;] tgt_request_handle+0x23c/0xac0 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa098d61a&amp;gt;] ptlrpc_main+0xd1a/0x1970 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffffa098c900&amp;gt;] ? ptlrpc_main+0x0/0x1970 [ptlrpc]
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff81096a36&amp;gt;] kthread+0x96/0xa0
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff810969a0&amp;gt;] ? kthread+0x0/0xa0
Jan 23 20:54:34 wtm-14vm4 kernel: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="130175" author="adilger" created="Tue, 13 Oct 2015 05:25:17 +0000"  >&lt;p&gt;Haven&apos;t seen this in a long time.&lt;/p&gt;</comment>
                            <comment id="130401" author="standan" created="Wed, 14 Oct 2015 17:42:25 +0000"  >&lt;p&gt;Encountered another instance for Hard Failover of EL6.7 Server/Client - ZFS for tag 2.7.61:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4d8e8fc6-6d42-11e5-bf10-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4d8e8fc6-6d42-11e5-bf10-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="31074">LU-6844</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwc87:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12183</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>