<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:59:08 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6312] soft lockup in sanity-lfsck test_18f</title>
                <link>https://jira.whamcloud.com/browse/LU-6312</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I was running &lt;tt&gt;sanity-lfsck&lt;/tt&gt; on master (v2_7_50_0-3-g7ed514f) and hit a soft lockup in test_18f:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Feb 27 05:01:30 LustreError: 8252:0:(fail.c:132:__cfs_fail_timeout_set()) cfs_fail_timeout id 1602 sleeping for 10000ms
Feb 27 05:01:30 LustreError: 8252:0:(fail.c:132:__cfs_fail_timeout_set()) Skipped 1 previous similar message
Feb 27 05:01:40 LustreError: 8252:0:(fail.c:136:__cfs_fail_timeout_set()) cfs_fail_timeout id 1602 awake
Feb 27 05:01:40 LustreError: 8252:0:(fail.c:136:__cfs_fail_timeout_set()) Skipped 1 previous similar message
Feb 27 05:01:43 Lustre: DEBUG MARKER: == sanity-lfsck test 18f: Skip the failed OST(s) when handle orphan OST-objects ====================== 05:01:42 (1425038502)
Feb 27 05:01:43 Pid: 7574, comm: mdt01_002
Feb 27 05:01:43 Call Trace:
Feb 27 05:01:43 [&amp;lt;ffffffffa0f1e895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
Feb 27 05:01:43 [&amp;lt;ffffffffa0ff462f&amp;gt;] osd_trans_start+0x63f/0x660 [osd_ldiskfs]
Feb 27 05:01:43 [&amp;lt;ffffffffa11eea1f&amp;gt;] lod_trans_start+0x9f/0x190 [lod]
Feb 27 05:01:43 [&amp;lt;ffffffffa10e0624&amp;gt;] mdd_trans_start+0x14/0x20 [mdd]
Feb 27 05:01:43 [&amp;lt;ffffffffa10cbd05&amp;gt;] mdd_create+0xbf5/0x1730 [mdd]
Feb 27 05:01:43 [&amp;lt;ffffffffa113ecb8&amp;gt;] mdo_create+0x18/0x50 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa1148e6f&amp;gt;] mdt_reint_open+0x1f8f/0x2c70 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa0f3b84c&amp;gt;] ? upcall_cache_get_entry+0x29c/0x880 [libcfs]
Feb 27 05:01:43 [&amp;lt;ffffffffa11300cd&amp;gt;] mdt_reint_rec+0x5d/0x200 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa111423b&amp;gt;] mdt_reint_internal+0x4cb/0x7a0 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa1114706&amp;gt;] mdt_intent_reint+0x1f6/0x430 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa1112cf4&amp;gt;] mdt_intent_policy+0x494/0xce0 [mdt]
Feb 27 05:01:43 [&amp;lt;ffffffffa0af94f9&amp;gt;] ldlm_lock_enqueue+0x129/0x9d0 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffffa0b2566b&amp;gt;] ldlm_handle_enqueue0+0x51b/0x13f0 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffffa0ba6331&amp;gt;] tgt_enqueue+0x61/0x230 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffffa0ba6f7e&amp;gt;] tgt_request_handle+0x8be/0x1000 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffffa0b569e1&amp;gt;] ptlrpc_main+0xe41/0x1960 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffffa0b55ba0&amp;gt;] ? ptlrpc_main+0x0/0x1960 [ptlrpc]
Feb 27 05:01:43 [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
Feb 27 05:01:43 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
Feb 27 05:01:43 [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
Feb 27 05:01:43 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
Feb 27 05:01:43 
Feb 27 05:01:44 Lustre: DEBUG MARKER: cancel_lru_locks osc start
Feb 27 05:01:44 Lustre: DEBUG MARKER: cancel_lru_locks osc stop
Feb 27 05:01:44 Lustre: *** cfs_fail_loc=1616, val=0***
Feb 27 05:01:44 Lustre: Skipped 1 previous similar message
Feb 27 05:01:47 Lustre: DEBUG MARKER: cancel_lru_locks mdc stop
Feb 27 05:01:47 Lustre: DEBUG MARKER: cancel_lru_locks osc start
Feb 27 05:01:47 Lustre: DEBUG MARKER: cancel_lru_locks osc stop
Feb 27 05:01:47 Lustre: *** cfs_fail_loc=161c, val=0***
Feb 27 05:01:54 Lustre: testfs-OST0002: Client testfs-MDT0001-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:01:54 Lustre: testfs-OST0002: deleting orphan objects from 0x280000400:102 to 0x280000400:193
Feb 27 05:01:54 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:929
Feb 27 05:02:01 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:01 Lustre: Skipped 1 previous similar message
Feb 27 05:02:01 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:961
Feb 27 05:02:08 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:08 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:993
Feb 27 05:02:15 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:15 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1025
Feb 27 05:02:22 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:22 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1057
Feb 27 05:02:29 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1089
Feb 27 05:02:36 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:36 Lustre: Skipped 1 previous similar message
Feb 27 05:02:36 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1121
Feb 27 05:02:43 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1153
Feb 27 05:02:50 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1185
Feb 27 05:02:57 Lustre: testfs-OST0000: Client testfs-MDT0000-mdtlov_UUID (at 0@lo) reconnecting
Feb 27 05:02:57 Lustre: Skipped 2 previous similar messages
Feb 27 05:02:57 Lustre: testfs-OST0000: deleting orphan objects from 0x0:901 to 0x0:1217
Feb 27 05:03:00 BUG: soft lockup - CPU#0 stuck for 67s! [ll_ost00_015:7824]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The stack trace is a bit strange since it lists all of the addresses on the stack as &quot;uncertain&quot; (leading &apos;?&apos;):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Pid: 7824, comm: ll_ost00_015 Tainted: P           ---------------    2.6.32-431.29.2.el6_lustre.g36cd22b.x86_64RIP: 0010:[&amp;lt;ffffffff8152c10e&amp;gt;]  [&amp;lt;ffffffff8152c10e&amp;gt;] _spin_lock+0x1e/0x30
Process ll_ost00_015 (pid: 7824, threadinfo ffff8800ca29a000, task ffff8800d91e0040)
Call Trace:
 ? lfsck_layout_slave_in_notify+0x210/0x15b0 [lfsck]
 ? null_alloc_rs+0xf3/0x390 [ptlrpc]
 ? sptlrpc_svc_alloc_rs+0x74/0x360 [ptlrpc]
 ? lustre_msg_buf+0x55/0x60 [ptlrpc]
 ? __req_capsule_get+0x162/0x6d0 [ptlrpc]
 ? lfsck_in_notify+0xef/0x330 [lfsck]
 ? tgt_handle_lfsck_notify+0x64/0x150 [ptlrpc]
 ? tgt_request_handle+0x8be/0x1000 [ptlrpc]
 ? ptlrpc_main+0xe41/0x1960 [ptlrpc]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The test did not progress beyond this, and the same process has been stuck in the watchdog for many hours.&lt;/p&gt;</description>
                <environment>Single node test system (MGT, 2x MDT, 3x OST)&lt;br/&gt;
RHEL 6.5 kernel 2.6.32-431.29.2.el6_lustre.g36cd22b.x86_64&lt;br/&gt;
Lustre v2_7_50_0-3-g7ed514f&lt;br/&gt;
{noformat}&lt;br/&gt;
7ed514fc1c51 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5420&quot; title=&quot;Failure on test suite sanity test_17m: mount MDS failed, Input/output error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5420&quot;&gt;&lt;strike&gt;LU-5420&lt;/strike&gt;&lt;/a&gt; ptlrpc: revert ptlrpc_reconnect_import() changes&lt;br/&gt;
88a0262fdea8 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5912&quot; title=&quot;locking flaw generates logged errors&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5912&quot;&gt;&lt;strike&gt;LU-5912&lt;/strike&gt;&lt;/a&gt; build: Fix XeonPhi build&lt;br/&gt;
b851f20baba1 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6216&quot; title=&quot;Compilation error libtool on ppc64&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6216&quot;&gt;&lt;strike&gt;LU-6216&lt;/strike&gt;&lt;/a&gt; tests: compile fixes for PPC64, and for clang&lt;br/&gt;
f23e085ac937 Move master branch to 2.8 development&lt;br/&gt;
{noformat}</environment>
        <key id="28901">LU-6312</key>
            <summary>soft lockup in sanity-lfsck test_18f</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="adilger">Andreas Dilger</reporter>
                        <labels>
                    </labels>
                <created>Sun, 1 Mar 2015 23:36:39 +0000</created>
                <updated>Tue, 3 Mar 2015 18:09:35 +0000</updated>
                            <resolved>Tue, 3 Mar 2015 18:09:35 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.7.0</fixVersion>
                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="108349" author="adilger" created="Sun, 1 Mar 2015 23:37:20 +0000"  >&lt;p&gt;PS: I don&apos;t know if this is a new failure, or has been around for some time, as I haven&apos;t tested sanity-lfsck in some time.&lt;/p&gt;</comment>
                            <comment id="108359" author="yong.fan" created="Mon, 2 Mar 2015 03:40:38 +0000"  >&lt;p&gt;There is spin_lock leak in layout LFSCK, that may cause modifying the lfsck_layout_slave_data::llsd_master_list without spin_lock when others traverses such list with spin_lock.&lt;/p&gt;</comment>
                            <comment id="108360" author="gerrit" created="Mon, 2 Mar 2015 03:49:31 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13921&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13921&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6312&quot; title=&quot;soft lockup in sanity-lfsck test_18f&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6312&quot;&gt;&lt;del&gt;LU-6312&lt;/del&gt;&lt;/a&gt; lfsck: modify llsd_master_list with spin_lock&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a9a3bc335ccd5af33cfcbf2fc2a3f425727a1a6a&lt;/p&gt;</comment>
                            <comment id="108452" author="gerrit" created="Mon, 2 Mar 2015 18:41:42 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13921/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13921/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6312&quot; title=&quot;soft lockup in sanity-lfsck test_18f&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6312&quot;&gt;&lt;del&gt;LU-6312&lt;/del&gt;&lt;/a&gt; lfsck: modify llsd_master_list with spin_lock&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 094030bab406b0ea5d45f711549327829b68c9cd&lt;/p&gt;</comment>
                            <comment id="108506" author="gerrit" created="Tue, 3 Mar 2015 00:14:11 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13944&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13944&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6312&quot; title=&quot;soft lockup in sanity-lfsck test_18f&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6312&quot;&gt;&lt;del&gt;LU-6312&lt;/del&gt;&lt;/a&gt; lfsck: modify llsd_master_list with spin_lock&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_7&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b9fa93bb5cad33f9526a7bbb488ef359c176f1dd&lt;/p&gt;</comment>
                            <comment id="108608" author="gerrit" created="Tue, 3 Mar 2015 18:03:13 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13944/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13944/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6312&quot; title=&quot;soft lockup in sanity-lfsck test_18f&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6312&quot;&gt;&lt;del&gt;LU-6312&lt;/del&gt;&lt;/a&gt; lfsck: modify llsd_master_list with spin_lock&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_7&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 081a71a121f0bb42d73a7a90812fc3b2a1c02091&lt;/p&gt;</comment>
                            <comment id="108611" author="pjones" created="Tue, 3 Mar 2015 18:09:35 +0000"  >&lt;p&gt;Landed for 2.7 and 2.8&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzx7db:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>17667</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>