<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:57:00 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6075] Interop 2.6.0&lt;-&gt;2.7 sanity-scrub test_7: D process in MDS</title>
                <link>https://jira.whamcloud.com/browse/LU-6075</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a1aa0448-7e7b-11e4-8b8b-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a1aa0448-7e7b-11e4-8b8b-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_7 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;01:03:59:LustreError: 17421:0:(qmt_handler.c:427:qmt_dqacq0()) $$$ Release too much! uuid:lustre-MDT0000-lwp-OST0004_UUID release:262188 granted:0, total:0 qmt:lustre-QMT0000 pool:0-dt id:500 enforced:1 hard:394749 soft:375952 granted:0 time:0 qunit:16384 edquot:0 may_rel:0 revoke:0
01:03:59:LustreError: 17421:0:(qmt_handler.c:427:qmt_dqacq0()) Skipped 1 previous similar message
01:03:59:Lustre: *** cfs_fail_loc=191, val=0***
01:03:59:Lustre: lustre-MDT0000: trigger OI scrub by RPC for [0x200000bd0:0x100:0x0], rc = 0 [2]
01:03:59:INFO: task jbd2/dm-0-8:17400 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:jbd2/dm-0-8   D 0000000000000001     0 17400      2 0x00000080
01:03:59: ffff88005b709d20 0000000000000046 ffff880065334090 ffff880065334080
01:03:59: ffff8800606a7680 ffff8800023168e8 0000000000017997 ffff88006d06d540
01:03:59: ffff88006d06daf8 ffff88005b709fd8 000000000000fbc8 ffff88006d06daf8
01:03:59:Call Trace:
01:03:59: [&amp;lt;ffffffff8109b2ce&amp;gt;] ? prepare_to_wait+0x4e/0x80
01:03:59: [&amp;lt;ffffffffa03df80f&amp;gt;] jbd2_journal_commit_transaction+0x19f/0x1500 [jbd2]
01:03:59: [&amp;lt;ffffffff810096f0&amp;gt;] ? __switch_to+0xd0/0x320
01:03:59: [&amp;lt;ffffffff81083e1c&amp;gt;] ? lock_timer_base+0x3c/0x70
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03e5a58&amp;gt;] kjournald2+0xb8/0x220 [jbd2]
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03e59a0&amp;gt;] ? kjournald2+0x0/0x220 [jbd2]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
01:03:59:INFO: task osp-syn-0-0:17440 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:osp-syn-0-0   D 0000000000000000     0 17440      2 0x00000080
01:03:59: ffff8800605c1700 0000000000000046 0000000000000000 ffffffff8128d756
01:03:59: 0000000000000008 ffff88006a811a80 ffff8800605c2f68 ffff88005e305600
01:03:59: ffff88006aeb9ab8 ffff8800605c1fd8 000000000000fbc8 ffff88006aeb9ab8
01:03:59:Call Trace:
01:03:59: [&amp;lt;ffffffff8128d756&amp;gt;] ? vsnprintf+0x336/0x5e0
01:03:59: [&amp;lt;ffffffffa03de08a&amp;gt;] start_this_handle+0x25a/0x480 [jbd2]
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03de495&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
01:03:59: [&amp;lt;ffffffffa05b55d4&amp;gt;] ? llog_osd_declare_write_rec+0xd4/0x3f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa04367b6&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0d0c7cf&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
01:03:59: [&amp;lt;ffffffffa05a4b7c&amp;gt;] llog_write+0x22c/0x420 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a4e3f&amp;gt;] llog_cancel_rec+0xaf/0x690 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aadf3&amp;gt;] llog_cat_cancel_records+0x133/0x2f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b13f2&amp;gt;] osp_sync_process_committed+0x2b2/0x820 [osp]
01:03:59: [&amp;lt;ffffffffa10b19d3&amp;gt;] osp_sync_process_queues+0x73/0x700 [osp]
01:03:59: [&amp;lt;ffffffff81061d00&amp;gt;] ? default_wake_function+0x0/0x20
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aab28&amp;gt;] llog_cat_process_cb+0x448/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aa6e0&amp;gt;] ? llog_cat_process_cb+0x0/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a8dfd&amp;gt;] llog_cat_process_or_fork+0x1ad/0x300 [obdclass]
01:03:59: [&amp;lt;ffffffff81061d12&amp;gt;] ? default_wake_function+0x12/0x20
01:03:59: [&amp;lt;ffffffff810546b9&amp;gt;] ? __wake_up_common+0x59/0x90
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a8f69&amp;gt;] llog_cat_process+0x19/0x20 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b2673&amp;gt;] osp_sync_thread+0x243/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff815296ee&amp;gt;] ? thread_return+0x4e/0x770
01:03:59: [&amp;lt;ffffffffa10b2430&amp;gt;] ? osp_sync_thread+0x0/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
01:03:59:INFO: task osp-syn-1-0:17442 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:osp-syn-1-0   D 0000000000000000     0 17442      2 0x00000080
01:03:59: ffff880057c99700 0000000000000046 0000000000000000 ffffffff8128d756
01:03:59: 0000000000000008 ffff88006a811b40 ffff88005d7d0f68 ffff88005e305600
01:03:59: ffff88006cf95098 ffff880057c99fd8 000000000000fbc8 ffff88006cf95098
01:03:59:Call Trace:
01:03:59: [&amp;lt;ffffffff8128d756&amp;gt;] ? vsnprintf+0x336/0x5e0
01:03:59: [&amp;lt;ffffffffa03de08a&amp;gt;] start_this_handle+0x25a/0x480 [jbd2]
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03de495&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
01:03:59: [&amp;lt;ffffffffa05b55d4&amp;gt;] ? llog_osd_declare_write_rec+0xd4/0x3f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa04367b6&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0d0c7cf&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
01:03:59: [&amp;lt;ffffffffa05a4b7c&amp;gt;] llog_write+0x22c/0x420 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a4e3f&amp;gt;] llog_cancel_rec+0xaf/0x690 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aadf3&amp;gt;] llog_cat_cancel_records+0x133/0x2f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b13f2&amp;gt;] osp_sync_process_committed+0x2b2/0x820 [osp]
01:03:59: [&amp;lt;ffffffffa10b19d3&amp;gt;] osp_sync_process_queues+0x73/0x700 [osp]
01:03:59: [&amp;lt;ffffffff81061d00&amp;gt;] ? default_wake_function+0x0/0x20
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aab28&amp;gt;] llog_cat_process_cb+0x448/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aa6e0&amp;gt;] ? llog_cat_process_cb+0x0/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a8dfd&amp;gt;] llog_cat_process_or_fork+0x1ad/0x300 [obdclass]
01:03:59: [&amp;lt;ffffffff81061d12&amp;gt;] ? default_wake_function+0x12/0x20
01:03:59: [&amp;lt;ffffffff810546b9&amp;gt;] ? __wake_up_common+0x59/0x90
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a8f69&amp;gt;] llog_cat_process+0x19/0x20 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b2673&amp;gt;] osp_sync_thread+0x243/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff815296ee&amp;gt;] ? thread_return+0x4e/0x770
01:03:59: [&amp;lt;ffffffffa10b2430&amp;gt;] ? osp_sync_thread+0x0/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
01:03:59:INFO: task osp-syn-2-0:17444 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:osp-syn-2-0   D 0000000000000001     0 17444      2 0x00000080
01:03:59: ffff880079239700 0000000000000046 0000000000000000 ffffffff8128d756
01:03:59: 0000000000000008 ffff88006ca29d80 ffff88007923cf68 ffff88005e305600
01:03:59: ffff880057dbd098 ffff880079239fd8 000000000000fbc8 ffff880057dbd098
01:03:59:Call Trace:
01:03:59: [&amp;lt;ffffffff8128d756&amp;gt;] ? vsnprintf+0x336/0x5e0
01:03:59: [&amp;lt;ffffffffa03de08a&amp;gt;] start_this_handle+0x25a/0x480 [jbd2]
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03de495&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
01:03:59: [&amp;lt;ffffffffa05b55d4&amp;gt;] ? llog_osd_declare_write_rec+0xd4/0x3f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa04367b6&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0d0c7cf&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
01:03:59: [&amp;lt;ffffffffa05a4b7c&amp;gt;] llog_write+0x22c/0x420 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a4e3f&amp;gt;] llog_cancel_rec+0xaf/0x690 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aadf3&amp;gt;] llog_cat_cancel_records+0x133/0x2f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b13f2&amp;gt;] osp_sync_process_committed+0x2b2/0x820 [osp]
01:03:59: [&amp;lt;ffffffffa10b19d3&amp;gt;] osp_sync_process_queues+0x73/0x700 [osp]
01:03:59: [&amp;lt;ffffffff81061d00&amp;gt;] ? default_wake_function+0x0/0x20
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffff81060aa3&amp;gt;] ? perf_event_task_sched_out+0x33/0x70
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aab28&amp;gt;] llog_cat_process_cb+0x448/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aa6e0&amp;gt;] ? llog_cat_process_cb+0x0/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a8dfd&amp;gt;] llog_cat_process_or_fork+0x1ad/0x300 [obdclass]
01:03:59: [&amp;lt;ffffffff81061d12&amp;gt;] ? default_wake_function+0x12/0x20
01:03:59: [&amp;lt;ffffffff810546b9&amp;gt;] ? __wake_up_common+0x59/0x90
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a8f69&amp;gt;] llog_cat_process+0x19/0x20 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b2673&amp;gt;] osp_sync_thread+0x243/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff815296ee&amp;gt;] ? thread_return+0x4e/0x770
01:03:59: [&amp;lt;ffffffffa10b2430&amp;gt;] ? osp_sync_thread+0x0/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
01:03:59:INFO: task osp-syn-3-0:17446 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:osp-syn-3-0   D 0000000000000001     0 17446      2 0x00000080
01:03:59: ffff8800581b5700 0000000000000046 0000000000000000 ffffffff8128d756
01:03:59: 0000000000000008 ffff880065334680 ffff8800581b6f68 ffff88005e305600
01:03:59: ffff880066fde5f8 ffff8800581b5fd8 000000000000fbc8 ffff880066fde5f8
01:03:59:Call Trace:
01:03:59: [&amp;lt;ffffffff8128d756&amp;gt;] ? vsnprintf+0x336/0x5e0
01:03:59: [&amp;lt;ffffffffa03de08a&amp;gt;] start_this_handle+0x25a/0x480 [jbd2]
01:03:59: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
01:03:59: [&amp;lt;ffffffffa03de495&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
01:03:59: [&amp;lt;ffffffffa05b55d4&amp;gt;] ? llog_osd_declare_write_rec+0xd4/0x3f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa04367b6&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0d0c7cf&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
01:03:59: [&amp;lt;ffffffffa05a4b7c&amp;gt;] llog_write+0x22c/0x420 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a4e3f&amp;gt;] llog_cancel_rec+0xaf/0x690 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aadf3&amp;gt;] llog_cat_cancel_records+0x133/0x2f0 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b13f2&amp;gt;] osp_sync_process_committed+0x2b2/0x820 [osp]
01:03:59: [&amp;lt;ffffffffa10b19d3&amp;gt;] osp_sync_process_queues+0x73/0x700 [osp]
01:03:59: [&amp;lt;ffffffff81061d00&amp;gt;] ? default_wake_function+0x0/0x20
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aab28&amp;gt;] llog_cat_process_cb+0x448/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a5ec9&amp;gt;] llog_process_thread+0xaa9/0xe80 [obdclass]
01:03:59: [&amp;lt;ffffffffa05aa6e0&amp;gt;] ? llog_cat_process_cb+0x0/0x5e0 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a7abf&amp;gt;] llog_process_or_fork+0x13f/0x540 [obdclass]
01:03:59: [&amp;lt;ffffffffa05a8dfd&amp;gt;] llog_cat_process_or_fork+0x1ad/0x300 [obdclass]
01:03:59: [&amp;lt;ffffffff81061d12&amp;gt;] ? default_wake_function+0x12/0x20
01:03:59: [&amp;lt;ffffffff810546b9&amp;gt;] ? __wake_up_common+0x59/0x90
01:03:59: [&amp;lt;ffffffffa10b1960&amp;gt;] ? osp_sync_process_queues+0x0/0x700 [osp]
01:03:59: [&amp;lt;ffffffffa05a8f69&amp;gt;] llog_cat_process+0x19/0x20 [obdclass]
01:03:59: [&amp;lt;ffffffffa10b2673&amp;gt;] osp_sync_thread+0x243/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff815296ee&amp;gt;] ? thread_return+0x4e/0x770
01:03:59: [&amp;lt;ffffffffa10b2430&amp;gt;] ? osp_sync_thread+0x0/0x7d0 [osp]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
01:03:59:INFO: task mdt00_004:18960 blocked for more than 120 seconds.
01:03:59:      Not tainted 2.6.32-431.29.2.el6_lustre.x86_64 #1
01:03:59:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:03:59:mdt00_004     D 0000000000000001     0 18960      2 0x00000080
01:03:59: ffff88007942f7b0 0000000000000046 0000000000000000 ffffffff8116f933
01:03:59: ffff88007942f760 ffffffff8116f933 ffff88007942f7d0 ffffffffa0cb5d0f
01:03:59: ffff88006c955ab8 ffff88007942ffd8 000000000000fbc8 ffff88006c955ab8
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server: lustre-master build #2770&lt;br/&gt;
client: 2.6.0</environment>
        <key id="28023">LU-6075</key>
            <summary>Interop 2.6.0&lt;-&gt;2.7 sanity-scrub test_7: D process in MDS</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 29 Dec 2014 22:16:19 +0000</created>
                <updated>Fri, 20 Feb 2015 16:15:51 +0000</updated>
                            <resolved>Fri, 20 Feb 2015 16:15:51 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                                    <fixVersion>Lustre 2.7.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="102403" author="yong.fan" created="Tue, 30 Dec 2014 07:57:04 +0000"  >&lt;p&gt;In the test:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a1aa0448-7e7b-11e4-8b8b-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a1aa0448-7e7b-11e4-8b8b-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;sanity-scrub test_6 failed for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6012&quot; title=&quot;sanity-scrub test_6 test_7 test_8 test_9 test_10a: expected &amp;#39;inconsistent&amp;#39; but got &amp;#39;inconsistent,auto&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6012&quot;&gt;&lt;del&gt;LU-6012&lt;/del&gt;&lt;/a&gt;, which has been fixed by the patch: &lt;a href=&quot;http://review.whamcloud.com/#/c/13020/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/13020/&lt;/a&gt;. The build #2770 does not contains such patch. So please retry the latest master.&lt;/p&gt;</comment>
                            <comment id="102405" author="yong.fan" created="Tue, 30 Dec 2014 08:20:10 +0000"  >&lt;p&gt;About sanity-scrub test_7 failure: the OI scrub thread was blocked at jbd2_journal_start() because the thread jbd2/dm-0-8 was making journal flush at jbd2_journal_commit_transaction(); but thread jbd2/dm-0-8 was waiting the thread mdt_007 to stop its current transaction. Unfortunately, the thread mdt_007 was hung inside mdd_unlink for OSD PDO lock:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;01:03:59: [&amp;lt;ffffffff811bf647&amp;gt;] ? __find_get_block+0x97/0xe0
01:03:59: [&amp;lt;ffffffffa0441a8a&amp;gt;] htree_node_lock_try+0x5aa/0x720 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa04234ae&amp;gt;] ldiskfs_htree_node_lock+0x9e/0xc0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa042524f&amp;gt;] dx_probe+0x3cf/0x6f0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa042580e&amp;gt;] ldiskfs_dx_find_entry+0x29e/0x2e0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0cb5d0f&amp;gt;] ? qsd_op_begin+0x5f/0xb40 [lquota]
01:03:59: [&amp;lt;ffffffffa0425b6b&amp;gt;] ldiskfs_find_entry+0x31b/0x4a0 [ldiskfs]
01:03:59: [&amp;lt;ffffffffa0d1543a&amp;gt;] osd_index_ea_delete+0x2ba/0xc90 [osd_ldiskfs]
01:03:59: [&amp;lt;ffffffffa0fd0117&amp;gt;] lod_index_delete+0xa7/0x140 [lod]
01:03:59: [&amp;lt;ffffffffa103758c&amp;gt;] __mdd_index_delete_only+0xdc/0x280 [mdd]
01:03:59: [&amp;lt;ffffffffa1038500&amp;gt;] __mdd_index_delete+0x50/0x2c0 [mdd]
01:03:59: [&amp;lt;ffffffffa103df5c&amp;gt;] mdd_unlink+0x81c/0xea0 [mdd]
01:03:59: [&amp;lt;ffffffffa0efe008&amp;gt;] mdo_unlink+0x18/0x50 [mdt]
01:03:59: [&amp;lt;ffffffffa0f080b6&amp;gt;] mdt_reint_unlink+0x9c6/0x10c0 [mdt]
01:03:59: [&amp;lt;ffffffffa060e810&amp;gt;] ? lu_ucred+0x20/0x30 [obdclass]
01:03:59: [&amp;lt;ffffffffa0efe09d&amp;gt;] mdt_reint_rec+0x5d/0x200 [mdt]
01:03:59: [&amp;lt;ffffffffa0ee218b&amp;gt;] mdt_reint_internal+0x4cb/0x7a0 [mdt]
01:03:59: [&amp;lt;ffffffffa0ee29eb&amp;gt;] mdt_reint+0x6b/0x120 [mdt]
01:03:59: [&amp;lt;ffffffffa0873ade&amp;gt;] tgt_request_handle+0x6fe/0xaf0 [ptlrpc]
01:03:59: [&amp;lt;ffffffffa0823411&amp;gt;] ptlrpc_main+0xe41/0x1950 [ptlrpc]
01:03:59: [&amp;lt;ffffffffa08225d0&amp;gt;] ? ptlrpc_main+0x0/0x1950 [ptlrpc]
01:03:59: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
01:03:59: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
01:03:59: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
01:03:59: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There were no more logs to indicate why mdt_007 was blocked by the PDO lock.&lt;/p&gt;</comment>
                            <comment id="102423" author="sarah" created="Tue, 30 Dec 2014 19:05:12 +0000"  >&lt;p&gt;this test passed on the latest master version&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/98b0df84-8fcd-11e4-9293-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/98b0df84-8fcd-11e4-9293-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="102440" author="yong.fan" created="Wed, 31 Dec 2014 04:04:05 +0000"  >&lt;blockquote&gt;
&lt;p&gt;this test passed on the latest master version&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/98b0df84-8fcd-11e4-9293-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/98b0df84-8fcd-11e4-9293-5254006e85c2&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;That&apos;s fine.&lt;/p&gt;

&lt;p&gt;The left issue is why sanity-scrub test_7 hung. According to the test scripts, there is no unlink operation during the test. So there should be some other non-test threads running during the sanity-scrub. What they were? It may be that another competitive thread held the PDO lock as to block the unlink operation. Unfortunately, there were no logs for that.&lt;/p&gt;</comment>
                            <comment id="102443" author="yong.fan" created="Wed, 31 Dec 2014 04:57:07 +0000"  >&lt;p&gt;Liang, do you have any idea about the PDO lock?&lt;/p&gt;</comment>
                            <comment id="102983" author="gerrit" created="Fri, 9 Jan 2015 10:22:07 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13311&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13311&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6075&quot; title=&quot;Interop 2.6.0&amp;lt;-&amp;gt;2.7 sanity-scrub test_7: D process in MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6075&quot;&gt;&lt;del&gt;LU-6075&lt;/del&gt;&lt;/a&gt; osd: race for check/chance od_dirent_journal&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b559a828a4318882a5c619d84ec836d21ecbe987&lt;/p&gt;</comment>
                            <comment id="103937" author="gerrit" created="Mon, 19 Jan 2015 23:47:03 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13311/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13311/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6075&quot; title=&quot;Interop 2.6.0&amp;lt;-&amp;gt;2.7 sanity-scrub test_7: D process in MDS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6075&quot;&gt;&lt;del&gt;LU-6075&lt;/del&gt;&lt;/a&gt; osd: race for check/chance od_dirent_journal&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: fe486c9894aa1d2275b66b3b3b0baee69a016afa&lt;/p&gt;</comment>
                            <comment id="107506" author="jlevi" created="Fri, 20 Feb 2015 16:15:51 +0000"  >&lt;p&gt;Patch landed to Master.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzx33j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>16902</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>